Examples

Explore practical examples and code snippets to jumpstart your Martian API integration.

Quick Examples

Basic Chat Completion

import openai

client = openai.OpenAI(
    base_url="https://api.withmartian.com/v1",
    api_key=MARTIAN_API_KEY
)

response = client.chat.completions.create(
    model="openai/gpt-4.1-nano",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Explain quantum computing in simple terms."}
    ]
)

print(response.choices[0].message.content)

Advanced Examples

Streaming with Error Handling

import openai
from tenacity import retry, stop_after_attempt, wait_exponential

client = openai.OpenAI(
    base_url="https://api.withmartian.com/v1",
    api_key=MARTIAN_API_KEY
)

@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=10)
)
def stream_chat_completion(messages):
    try:
        stream = client.chat.completions.create(
            model="openai/gpt-4.1-nano:cheap",
            messages=messages,
            stream=True,
            temperature=0.7,
            max_tokens=1000
        )
        
        for chunk in stream:
            if chunk.choices[0].delta.content:
                yield chunk.choices[0].delta.content
                
    except Exception as e:
        print(f"Error: {e}")
        raise

# Use the function
messages = [{"role": "user", "content": "Write a short story"}]
for text in stream_chat_completion(messages):
    print(text, end="", flush=True)

Function Calling Example

import json
import openai

client = openai.OpenAI(
    base_url="https://api.withmartian.com/v1",
    api_key=MARTIAN_API_KEY
)

# Define available functions
def get_weather(location: str, unit: str = "fahrenheit"):
    """Get the current weather for a location"""
    # This would call a real weather API
    return {
        "location": location,
        "temperature": 72,
        "unit": unit,
        "forecast": "sunny"
    }

# Define the function schema
functions = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get the current weather in a given location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The city and state, e.g. San Francisco, CA"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"]
                    }
                },
                "required": ["location"]
            }
        }
    }
]

# Make the API call
response = client.chat.completions.create(
    model="openai/gpt-4.1-nano",
    messages=[
        {"role": "user", "content": "What's the weather like in Boston?"}
    ],
    tools=functions,
    tool_choice="auto"
)

# Process function calls
message = response.choices[0].message
if message.tool_calls:
    for tool_call in message.tool_calls:
        if tool_call.function.name == "get_weather":
            args = json.loads(tool_call.function.arguments)
            result = get_weather(**args)
            print(f"Weather in {args['location']}: {result}")

Use Case Examples

Content Generation

# Blog post generator with SEO optimization
response = client.chat.completions.create(
    model="openai/gpt-4.1-nano:cheap",
    messages=[
        {
            "role": "system", 
            "content": "You are an SEO-optimized content writer."
        },
        {
            "role": "user",
            "content": "Write a 500-word blog post about sustainable living"
        }
    ],
    temperature=0.7
)

Code Generation

# Generate unit tests for a function
response = client.chat.completions.create(
    model="anthropic/claude-sonnet-4-20250514",
    messages=[
        {
            "role": "user",
            "content": """Generate comprehensive unit tests for this function:
            
def calculate_discount(price, discount_percent):
    if discount_percent < 0 or discount_percent > 100:
        raise ValueError("Discount must be between 0 and 100")
    return price * (1 - discount_percent / 100)
"""
        }
    ],
    max_tokens=2000
)

Data Analysis

# Analyze CSV data and generate insights
response = client.chat.completions.create(
    model="google/gemini-2.5-flash",
    messages=[
        {
            "role": "user",
            "content": f"""Analyze this sales data and provide insights:
            
{csv_data}

Please identify:
1. Top performing products
2. Sales trends
3. Recommendations for improvement
"""
        }
    ]
)

Integration Patterns

Retry Logic

from tenacity import retry, stop_after_attempt, wait_exponential

@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=10)
)
def call_martian_api(messages):
    return client.chat.completions.create(
        model="openai/gpt-4.1-nano:cheap",
        messages=messages
    )

Response Caching

from functools import lru_cache
import hashlib
import json

@lru_cache(maxsize=100)
def cached_completion(messages_hash):
    # Note: messages should be serialized for hashing
    return client.chat.completions.create(
        model="openai/gpt-4.1-nano",
        messages=json.loads(messages_hash)
    )

def get_completion_cached(messages):
    messages_str = json.dumps(messages, sort_keys=True)
    messages_hash = hashlib.sha256(messages_str.encode()).hexdigest()
    return cached_completion(messages_hash)

Next Steps

Explore API Endpoints

Learn about chat completions, messages, and model listing endpoints.

Read more

View Available Models

Browse 200+ AI models from leading providers with real-time pricing.

Read more

Join Discord Community

Connect with other developers and get real-time help from our team.

Read more