🔌 OpenAI API & Integration

Build AI-powered applications

The OpenAI API

OpenAI's API gives you access to powerful models like GPT-4, GPT-3.5, DALL-E, and Whisper. Build chatbots, content generators, code assistants, and more.

🚀 Quick Start

1. Installation

pip install openai

2. Get API Key

Visit platform.openai.com/api-keys to create your API key

3. First API Call

import openai

openai.api_key = "sk-your-api-key-here"

response = openai.ChatCompletion.create(
    model="gpt-3.5-turbo",
    messages=[
        {"role": "user", "content": "Hello, how are you?"}
    ]
)

print(response.choices[0].message.content)

💬 Chat Completions

Basic Chat

response = openai.ChatCompletion.create(
    model="gpt-4",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Explain quantum computing"}
    ],
    temperature=0.7,
    max_tokens=500
)

print(response.choices[0].message.content)
print(f"Tokens used: {response.usage.total_tokens}")

Multi-turn Conversation

conversation = []

def chat(user_message):
    # Add user message
    conversation.append({"role": "user", "content": user_message})
    
    # Get response
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=conversation
    )
    
    # Add assistant response
    assistant_message = response.choices[0].message.content
    conversation.append({"role": "assistant", "content": assistant_message})
    
    return assistant_message

# Use it
print(chat("What's the capital of France?"))
print(chat("What's the population?"))  # Remembers context!
print(chat("Tell me a fact about it"))  # Still knows we're talking about Paris

Streaming Responses

response = openai.ChatCompletion.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "Write a short story"}],
    stream=True  # Enable streaming
)

# Print response as it arrives
for chunk in response:
    if chunk.choices[0].delta.get("content"):
        print(chunk.choices[0].delta.content, end="", flush=True)

🎨 DALL-E Image Generation

response = openai.Image.create(
    prompt="A serene lake at sunset with mountains, digital art",
    n=1,  # Number of images
    size="1024x1024"  # Or "256x256", "512x512"
)

image_url = response.data[0].url
print(f"Image URL: {image_url}")

# Download image
import requests
from PIL import Image
from io import BytesIO

image_data = requests.get(image_url).content
image = Image.open(BytesIO(image_data))
image.save("generated_image.png")

Image Variations

response = openai.Image.create_variation(
    image=open("input_image.png", "rb"),
    n=3,  # Generate 3 variations
    size="512x512"
)

for i, img_data in enumerate(response.data):
    print(f"Variation {i+1}: {img_data.url}")

🎤 Whisper Speech-to-Text

audio_file = open("speech.mp3", "rb")

transcript = openai.Audio.transcribe(
    model="whisper-1",
    file=audio_file
)

print(transcript.text)

# With translation to English
translation = openai.Audio.translate(
    model="whisper-1",
    file=audio_file  # In any language
)

print(translation.text)  # Always English

📝 Embeddings

def get_embedding(text):
    response = openai.Embedding.create(
        model="text-embedding-ada-002",
        input=text
    )
    return response.data[0].embedding

# Get embeddings
text1 = "I love programming"
text2 = "Coding is awesome"
text3 = "I like pizza"

emb1 = get_embedding(text1)
emb2 = get_embedding(text2)
emb3 = get_embedding(text3)

# Calculate similarity
from numpy import dot
from numpy.linalg import norm

def cosine_similarity(a, b):
    return dot(a, b) / (norm(a) * norm(b))

print(cosine_similarity(emb1, emb2))  # High (~0.9)
print(cosine_similarity(emb1, emb3))  # Low (~0.5)

🛠️ Function Calling

functions = [
    {
        "name": "get_weather",
        "description": "Get the current weather in a location",
        "parameters": {
            "type": "object",
            "properties": {
                "location": {
                    "type": "string",
                    "description": "City name, e.g. San Francisco"
                },
                "unit": {
                    "type": "string",
                    "enum": ["celsius", "fahrenheit"]
                }
            },
            "required": ["location"]
        }
    }
]

response = openai.ChatCompletion.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
    functions=functions,
    function_call="auto"
)

# Check if function was called
if response.choices[0].message.get("function_call"):
    function_name = response.choices[0].message.function_call.name
    function_args = json.loads(response.choices[0].message.function_call.arguments)
    
    print(f"Function: {function_name}")
    print(f"Arguments: {function_args}")
    # {"location": "Tokyo", "unit": "celsius"}

💰 Cost Management

Pricing (as of 2024)

Model Input (1k tokens) Output (1k tokens)
GPT-3.5 Turbo $0.0005 $0.0015
GPT-4 $0.03 $0.06
GPT-4 Turbo $0.01 $0.03

Token Counting

import tiktoken

def count_tokens(text, model="gpt-4"):
    encoding = tiktoken.encoding_for_model(model)
    return len(encoding.encode(text))

text = "Hello, how are you doing today?"
tokens = count_tokens(text)
print(f"Tokens: {tokens}")  # ~8 tokens

# Estimate cost
input_tokens = 1000
output_tokens = 500
cost = (input_tokens * 0.03 + output_tokens * 0.06) / 1000
print(f"Estimated cost: ${cost:.4f}")

Reduce Costs

🔒 Best Practices

Security

  • Never commit API keys to Git
  • Use environment variables
  • Rotate keys regularly
  • Set usage limits
  • Validate user inputs

Error Handling

  • Implement retry logic
  • Handle rate limits
  • Catch API errors
  • Log failures
  • Provide fallbacks
import os
from tenacity import retry, stop_after_attempt, wait_exponential

# Secure API key loading
openai.api_key = os.getenv("OPENAI_API_KEY")

# Retry with exponential backoff
@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=10)
)
def call_openai_with_retry(prompt):
    try:
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[{"role": "user", "content": prompt}],
            timeout=30
        )
        return response.choices[0].message.content
    except openai.error.RateLimitError:
        print("Rate limit hit, retrying...")
        raise
    except openai.error.APIError as e:
        print(f"API error: {e}")
        raise
    except Exception as e:
        print(f"Unexpected error: {e}")
        return "Sorry, something went wrong."

🎯 Key Takeaways