HTTP Client Integration
Use any HTTP client in any programming language to make direct requests to the Martian API. This is ideal for custom integrations, server-side applications, or languages without official SDKs.
Ensure you have your Martian API key from the Martian Dashboard before continuing.
Authentication
All requests to the Martian API require authentication using a bearer token in the Authorization
header:
Authorization: Bearer MARTIAN_API_KEY
cURL
Chat Completions
curl https://api.withmartian.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer MARTIAN_API_KEY" \
-d '{
"model": "openai/gpt-4.1-nano",
"messages": [
{
"role": "user",
"content": "What is Olympus Mons?"
}
]
}'
Messages (Anthropic Format)
curl https://api.withmartian.com/v1/messages \
-H "Content-Type: application/json" \
-H "Authorization: Bearer MARTIAN_API_KEY" \
-d '{
"model": "anthropic/claude-sonnet-4-20250514",
"max_tokens": 1024,
"messages": [
{
"role": "user",
"content": "What is Olympus Mons?"
}
]
}'
Python
Using requests
Library
import requests
url = "https://api.withmartian.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer MARTIAN_API_KEY"
}
data = {
"model": "openai/gpt-4.1-nano",
"messages": [
{"role": "user", "content": "What is Olympus Mons?"}
]
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
print(result['choices'][0]['message']['content'])
Using httpx
(Async)
import httpx
async def make_request():
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.withmartian.com/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": "Bearer MARTIAN_API_KEY"
},
json={
"model": "openai/gpt-4.1-nano",
"messages": [
{"role": "user", "content": "Hello!"}
]
},
timeout=60.0
)
result = response.json()
return result['choices'][0]['message']['content']
JavaScript
Using fetch
const response = await fetch('https://api.withmartian.com/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.MARTIAN_API_KEY}`
},
body: JSON.stringify({
model: 'openai/gpt-4.1-nano',
messages: [
{ role: 'user', content: 'What is Olympus Mons?' }
]
})
});
const data = await response.json();
console.log(data.choices[0].message.content);
Using axios
import axios from 'axios';
const response = await axios.post(
'https://api.withmartian.com/v1/chat/completions',
{
model: 'openai/gpt-4.1-nano',
messages: [
{ role: 'user', content: 'Hello!' }
]
},
{
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.MARTIAN_API_KEY}`
}
}
);
console.log(response.data.choices[0].message.content);
Other Languages
The Martian API follows standard REST principles and can be used with any HTTP client.
Go
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
)
func main() {
url := "https://api.withmartian.com/v1/chat/completions"
requestBody := map[string]interface{}{
"model": "openai/gpt-4.1-nano",
"messages": []map[string]string{
{"role": "user", "content": "What is Olympus Mons?"},
},
}
jsonData, _ := json.Marshal(requestBody)
req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer MARTIAN_API_KEY")
client := &http.Client{}
resp, _ := client.Do(req)
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Println(string(body))
}
Ruby
require 'net/http'
require 'json'
require 'uri'
uri = URI('https://api.withmartian.com/v1/chat/completions')
request = Net::HTTP::Post.new(uri)
request['Content-Type'] = 'application/json'
request['Authorization'] = "Bearer MARTIAN_API_KEY"
request.body = {
model: 'openai/gpt-4.1-nano',
messages: [
{ role: 'user', content: 'What is Olympus Mons?' }
]
}.to_json
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
http.request(request)
end
result = JSON.parse(response.body)
puts result['choices'][0]['message']['content']
PHP
<?php
require 'vendor/autoload.php';
use GuzzleHttp\Client;
$client = new Client();
$response = $client->post('https://api.withmartian.com/v1/chat/completions', [
'headers' => [
'Content-Type' => 'application/json',
'Authorization' => 'Bearer MARTIAN_API_KEY'
],
'json' => [
'model' => 'openai/gpt-4.1-nano',
'messages' => [
['role' => 'user', 'content' => 'What is Olympus Mons?']
]
]
]);
$data = json_decode($response->getBody(), true);
echo $data['choices'][0]['message']['content'];
Java
import okhttp3.*;
import org.json.JSONObject;
import org.json.JSONArray;
public class MartianClient {
public static void main(String[] args) throws Exception {
OkHttpClient client = new OkHttpClient();
JSONObject requestBody = new JSONObject();
requestBody.put("model", "openai/gpt-4.1-nano");
JSONArray messages = new JSONArray();
JSONObject message = new JSONObject();
message.put("role", "user");
message.put("content", "What is Olympus Mons?");
messages.put(message);
requestBody.put("messages", messages);
RequestBody body = RequestBody.create(
requestBody.toString(),
MediaType.parse("application/json")
);
Request request = new Request.Builder()
.url("https://api.withmartian.com/v1/chat/completions")
.addHeader("Authorization", "Bearer MARTIAN_API_KEY")
.post(body)
.build();
Response response = client.newCall(request).execute();
System.out.println(response.body().string());
}
}
Rust
use reqwest;
use serde_json::json;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
let response = client
.post("https://api.withmartian.com/v1/chat/completions")
.header("Content-Type", "application/json")
.header("Authorization", "Bearer MARTIAN_API_KEY")
.json(&json!({
"model": "openai/gpt-4.1-nano",
"messages": [
{
"role": "user",
"content": "What is Olympus Mons?"
}
]
}))
.send()
.await?;
let data = response.json::<serde_json::Value>().await?;
println!("{}", data);
Ok(())
}
.NET / C#
using System;
using System.Net.Http;
using System.Text;
using System.Text.Json;
using System.Threading.Tasks;
class Program
{
static async Task Main()
{
using var client = new HttpClient();
var request = new
{
model = "openai/gpt-4.1-nano",
messages = new[]
{
new { role = "user", content = "What is Olympus Mons?" }
}
};
var json = JsonSerializer.Serialize(request);
var content = new StringContent(json, Encoding.UTF8, "application/json");
client.DefaultRequestHeaders.Add("Authorization", "Bearer MARTIAN_API_KEY");
var response = await client.PostAsync(
"https://api.withmartian.com/v1/chat/completions",
content
);
var result = await response.Content.ReadAsStringAsync();
Console.WriteLine(result);
}
}
Best Practices
Timeout Configuration
Always set appropriate timeouts for your HTTP requests:
import requests
response = requests.post(
url,
headers=headers,
json=data,
timeout=60 # 60 seconds
)
Error Handling
Always check response status codes and handle errors:
import requests
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
print(result['choices'][0]['message']['content'])
elif response.status_code == 401:
print("Authentication failed - check your API key")
elif response.status_code == 429:
print("Rate limit exceeded - retry with backoff")
elif response.status_code >= 500:
print("Server error - retry later")
else:
print(f"Error {response.status_code}: {response.text}")
Retry Logic
Implement exponential backoff for transient errors:
from tenacity import retry, stop_after_attempt, wait_exponential
import requests
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10)
)
def make_request(url, headers, data):
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()