What is LangChain?
LangChain is a framework for building applications powered by large language models. It provides tools for chains, agents, memory, and more - making it easy to create complex AI workflows.
🚀 Quick Start
pip install langchain openai
Basic LLM Call
from langchain.llms import OpenAI
import os
os.environ["OPENAI_API_KEY"] = "your-key-here"
llm = OpenAI(temperature=0.7)
response = llm("Explain quantum computing in simple terms")
print(response)
🔗 Chains
Simple Chain
from langchain import PromptTemplate, LLMChain
template = """
You are a helpful assistant.
Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.run("What is machine learning?")
print(response)
Sequential Chain
from langchain.chains import SimpleSequentialChain
# Chain 1: Generate idea
idea_chain = LLMChain(
llm=llm,
prompt=PromptTemplate(
template="Generate a startup idea about {topic}",
input_variables=["topic"]
)
)
# Chain 2: Write pitch
pitch_chain = LLMChain(
llm=llm,
prompt=PromptTemplate(
template="Write a pitch for this startup: {idea}",
input_variables=["idea"]
)
)
# Combine chains
overall_chain = SimpleSequentialChain(
chains=[idea_chain, pitch_chain]
)
result = overall_chain.run("AI for education")
print(result)
💾 Memory
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)
# Multi-turn conversation with memory
conversation.predict(input="Hi, I'm Alice")
conversation.predict(input="What's my name?") # Remembers "Alice"
conversation.predict(input="What did we talk about?") # Has context
📚 Document Loaders & RAG
from langchain.document_loaders import TextLoader, PDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
# Load documents
loader = TextLoader("data.txt")
documents = loader.load()
# Split into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
texts = text_splitter.split_documents(documents)
# Create embeddings and vector store
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings)
# Create QA chain
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=vectorstore.as_retriever()
)
# Ask questions about your documents!
answer = qa_chain.run("What is the main topic?")
print(answer)
🤖 Agents
from langchain.agents import load_tools, initialize_agent, AgentType
# Load tools
tools = load_tools(["wikipedia", "llm-math"], llm=llm)
# Create agent
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# Agent decides which tools to use
agent.run("Who is the current president? What's their age squared?")
🎯 Key Takeaways
- Chains connect multiple LLM calls
- Memory maintains conversation context
- RAG answers questions using your documents
- Agents use tools to solve complex tasks