🤖 Project: Build a ChatGPT Clone

Project Overview

Difficulty: Intermediate

Goal: Build a fully functional ChatGPT-like web application

Tech Stack: Python, OpenAI API, Streamlit, LangChain

Time Required: 3-4 hours

What You'll Build: A conversational AI with chat history, streaming responses, and customizable settings

Features We'll Implement

💬 Chat Interface

Clean UI with message bubbles, like ChatGPT

📝 Conversation History

Remember context across messages

⚡ Streaming Responses

Display text as it's generated

⚙️ Settings Panel

Adjust temperature, model, system prompt

💾 Save Chats

Export conversations

🎨 Custom Personas

Switch between AI personalities

Step 1: Setup

# Create project directory
mkdir chatgpt-clone
cd chatgpt-clone

# Install dependencies
pip install openai streamlit python-dotenv langchain

# Create .env file for API key
echo "OPENAI_API_KEY=your-api-key-here" > .env

# Project structure
chatgpt-clone/
├── app.py              # Main Streamlit app
├── .env                # API keys
├── utils.py            # Helper functions
└── requirements.txt    # Dependencies

Step 2: Basic Chat Application

# app.py - Simple version
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv

load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

# Page config
st.set_page_config(
    page_title="ChatGPT Clone",
    page_icon="🤖",
    layout="wide"
)

st.title("🤖 ChatGPT Clone")

# Initialize session state for conversation history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat history
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# User input
if prompt := st.chat_input("What would you like to know?"):
    # Add user message to chat
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)
    
    # Generate AI response
    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
        
        # Stream response
        for response in client.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=st.session_state.messages,
            stream=True,
        ):
            if response.choices[0].delta.content:
                full_response += response.choices[0].delta.content
                message_placeholder.markdown(full_response + "▌")
        
        message_placeholder.markdown(full_response)
    
    # Add assistant response to chat
    st.session_state.messages.append({"role": "assistant", "content": full_response})

💡 Run with: streamlit run app.py

Step 3: Add Settings Sidebar

# Enhanced app.py with settings
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv

load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

st.set_page_config(page_title="ChatGPT Clone", page_icon="🤖", layout="wide")

# Sidebar for settings
with st.sidebar:
    st.title("⚙️ Settings")
    
    # Model selection
    model = st.selectbox(
        "Model",
        ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo-preview"],
        index=0
    )
    
    # Temperature slider
    temperature = st.slider(
        "Temperature",
        min_value=0.0,
        max_value=2.0,
        value=0.7,
        step=0.1,
        help="Higher = more creative"
    )
    
    # Max tokens
    max_tokens = st.slider(
        "Max Response Length",
        min_value=100,
        max_value=4000,
        value=1000,
        step=100
    )
    
    # System prompt
    st.subheader("System Prompt")
    system_prompt = st.text_area(
        "Set AI behavior",
        value="You are a helpful AI assistant.",
        height=100
    )
    
    # Persona presets
    st.subheader("Quick Personas")
    if st.button("🧑‍💼 Professional"):
        system_prompt = "You are a professional business consultant."
    if st.button("🎨 Creative"):
        system_prompt = "You are a creative writer and artist."
    if st.button("💻 Coder"):
        system_prompt = "You are an expert programmer."
    if st.button("🎓 Teacher"):
        system_prompt = "You are a patient teacher."
    
    st.divider()
    
    # Clear chat button
    if st.button("🗑️ Clear Chat", use_container_width=True):
        st.session_state.messages = []
        st.rerun()

# Initialize messages with system prompt
if "messages" not in st.session_state:
    st.session_state.messages = []

# Main chat interface
st.title("🤖 ChatGPT Clone")
st.caption(f"Using {model} with temperature {temperature}")

# Display messages
for message in st.session_state.messages:
    if message["role"] != "system":  # Don't show system prompt
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

# Chat input
if prompt := st.chat_input("Ask me anything..."):
    # Add user message
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)
    
    # Prepare messages with system prompt
    messages_with_system = [
        {"role": "system", "content": system_prompt}
    ] + st.session_state.messages
    
    # Generate response
    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
        
        try:
            for response in client.chat.completions.create(
                model=model,
                messages=messages_with_system,
                temperature=temperature,
                max_tokens=max_tokens,
                stream=True,
            ):
                if response.choices[0].delta.content:
                    full_response += response.choices[0].delta.content
                    message_placeholder.markdown(full_response + "▌")
            
            message_placeholder.markdown(full_response)
            st.session_state.messages.append({"role": "assistant", "content": full_response})
        
        except Exception as e:
            st.error(f"Error: {str(e)}")
            st.info("Check your API key and internet connection.")

Step 4: Add Chat Export Feature

# Add to sidebar (in app.py)
import json
from datetime import datetime

with st.sidebar:
    # ... existing sidebar code ...
    
    st.divider()
    st.subheader("💾 Export Chat")
    
    # Export as JSON
    if st.button("Export as JSON", use_container_width=True):
        if st.session_state.messages:
            chat_data = {
                "timestamp": datetime.now().isoformat(),
                "model": model,
                "temperature": temperature,
                "messages": st.session_state.messages
            }
            
            json_str = json.dumps(chat_data, indent=2)
            st.download_button(
                label="Download JSON",
                data=json_str,
                file_name=f"chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
                mime="application/json"
            )
        else:
            st.warning("No messages to export")
    
    # Export as Markdown
    if st.button("Export as Markdown", use_container_width=True):
        if st.session_state.messages:
            md_content = f"# Chat Export\n\n"
            md_content += f"**Date:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
            md_content += f"**Model:** {model}\n\n"
            md_content += "---\n\n"
            
            for msg in st.session_state.messages:
                role = "👤 User" if msg["role"] == "user" else "🤖 Assistant"
                md_content += f"### {role}\n\n{msg['content']}\n\n"
            
            st.download_button(
                label="Download Markdown",
                data=md_content,
                file_name=f"chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md",
                mime="text/markdown"
            )
        else:
            st.warning("No messages to export")

Step 5: Add Advanced Features

Token Counter

# utils.py
import tiktoken

def count_tokens(text, model="gpt-3.5-turbo"):
    """Count tokens in text"""
    encoding = tiktoken.encoding_for_model(model)
    return len(encoding.encode(text))

def estimate_cost(tokens, model="gpt-3.5-turbo"):
    """Estimate API cost"""
    # Prices per 1K tokens (as of 2025)
    prices = {
        "gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015},
        "gpt-4": {"input": 0.03, "output": 0.06},
        "gpt-4-turbo-preview": {"input": 0.01, "output": 0.03}
    }
    
    price = prices.get(model, prices["gpt-3.5-turbo"])
    return (tokens / 1000) * price["input"]

# In app.py, add to sidebar:
with st.sidebar:
    st.divider()
    st.subheader("📊 Usage Stats")
    
    total_tokens = sum(count_tokens(msg["content"]) for msg in st.session_state.messages)
    estimated_cost = estimate_cost(total_tokens, model)
    
    st.metric("Total Tokens", f"{total_tokens:,}")
    st.metric("Estimated Cost", f"${estimated_cost:.4f}")

Conversation Templates

# Add to sidebar
st.subheader("🎯 Quick Templates")

templates = {
    "Code Review": "Review this code and suggest improvements:\n\n```python\n# Paste code here\n```",
    "Explain Like I'm 5": "Explain the following concept in simple terms:\n\n",
    "Brainstorm": "Help me brainstorm ideas for:\n\n",
    "Translate": "Translate the following to [language]:\n\n",
    "Summarize": "Summarize this text in 3 bullet points:\n\n"
}

selected_template = st.selectbox("Choose template", ["None"] + list(templates.keys()))

if selected_template != "None":
    if st.button("Use Template"):
        st.session_state.template_text = templates[selected_template]

# In main chat area:
if "template_text" in st.session_state:
    prompt = st.chat_input("Ask me anything...", value=st.session_state.template_text)
    del st.session_state.template_text
else:
    prompt = st.chat_input("Ask me anything...")

Step 6: Add File Upload Support

# Add file upload to sidebar
with st.sidebar:
    st.divider()
    st.subheader("📎 Upload Files")
    
    uploaded_file = st.file_uploader(
        "Upload text file",
        type=["txt", "md", "py", "js", "html", "css"]
    )
    
    if uploaded_file:
        file_content = uploaded_file.read().decode("utf-8")
        
        if st.button("📖 Analyze File"):
            prompt = f"Please analyze this file:\n\n```\n{file_content}\n```"
            st.session_state.file_prompt = prompt
            st.rerun()

# In main app, check for file prompt:
if "file_prompt" in st.session_state:
    prompt = st.session_state.file_prompt
    del st.session_state.file_prompt
    # Process as normal chat...

Complete Project Code

# Full production-ready app.py
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv
import json
from datetime import datetime

load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

# Page configuration
st.set_page_config(
    page_title="ChatGPT Clone",
    page_icon="🤖",
    layout="wide",
    initial_sidebar_state="expanded"
)

# Custom CSS
st.markdown("""
    
""", unsafe_allow_html=True)

# Sidebar
with st.sidebar:
    st.title("⚙️ Settings")
    
    model = st.selectbox(
        "Model",
        ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo-preview"],
        help="Choose the AI model"
    )
    
    temperature = st.slider("Temperature", 0.0, 2.0, 0.7, 0.1)
    max_tokens = st.slider("Max Tokens", 100, 4000, 1000, 100)
    
    system_prompt = st.text_area(
        "System Prompt",
        "You are a helpful AI assistant.",
        height=100
    )
    
    # Quick personas
    st.subheader("Quick Personas")
    col1, col2 = st.columns(2)
    with col1:
        if st.button("Professional", use_container_width=True):
            system_prompt = "You are a professional consultant."
        if st.button("Creative", use_container_width=True):
            system_prompt = "You are a creative writer."
    with col2:
        if st.button("Coder", use_container_width=True):
            system_prompt = "You are an expert programmer."
        if st.button("Teacher", use_container_width=True):
            system_prompt = "You are a patient teacher."
    
    st.divider()
    
    if st.button("🗑️ Clear Chat", use_container_width=True):
        st.session_state.messages = []
        st.rerun()
    
    # Export
    st.divider()
    if st.session_state.get("messages"):
        export_data = json.dumps({
            "timestamp": datetime.now().isoformat(),
            "messages": st.session_state.messages
        }, indent=2)
        
        st.download_button(
            "💾 Export Chat",
            export_data,
            f"chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
            "application/json",
            use_container_width=True
        )

# Initialize messages
if "messages" not in st.session_state:
    st.session_state.messages = []

# Main interface
st.title("🤖 ChatGPT Clone")
st.caption(f"Powered by {model}")

# Display messages
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Chat input
if prompt := st.chat_input("Ask me anything..."):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)
    
    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
        
        messages = [{"role": "system", "content": system_prompt}] + st.session_state.messages
        
        try:
            for response in client.chat.completions.create(
                model=model,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens,
                stream=True,
            ):
                if response.choices[0].delta.content:
                    full_response += response.choices[0].delta.content
                    message_placeholder.markdown(full_response + "▌")
            
            message_placeholder.markdown(full_response)
            st.session_state.messages.append({"role": "assistant", "content": full_response})
        
        except Exception as e:
            st.error(f"❌ Error: {str(e)}")

# Footer
st.divider()
st.caption("Built with Streamlit and OpenAI API")

Deployment

Option 1: Streamlit Cloud (Free)

# 1. Push to GitHub
git init
git add .
git commit -m "ChatGPT clone"
git push origin main

# 2. Go to share.streamlit.io
# 3. Connect your GitHub repo
# 4. Add OPENAI_API_KEY in secrets
# 5. Deploy!

Option 2: Docker

# Dockerfile
FROM python:3.11-slim

WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt

COPY . .

EXPOSE 8501

CMD ["streamlit", "run", "app.py", "--server.port=8501"]

# Build and run
docker build -t chatgpt-clone .
docker run -p 8501:8501 -e OPENAI_API_KEY=your-key chatgpt-clone

Enhancement Ideas