Real-World Examples
Learn from practical examples and use cases. All examples work across any provider.
Provider Flexibility
Core FeatureSwitch between providers with identical code. Perfect for development vs production environments.
# Development (free, local) with deterministic outputs
llm_dev = create_llm("ollama", model="llama3.2:3b", seed=42, temperature=0.0)
# Production (high quality, cloud) with same seed
llm_prod = create_llm("openai", model="gpt-4o-mini", seed=42, temperature=0.0)
# Same interface, deterministic results
def ask_question(llm, question):
return llm.generate(question)
# Reproducible outputs across providers
dev_answer = ask_question(llm_dev, "What is Python?")
prod_answer = ask_question(llm_prod, "What is Python?")
# Both will give consistent, reproducible answers
RAG with Embeddings
AdvancedBuild retrieval-augmented generation systems with built-in embedding support.
from abstractcore.embeddings import EmbeddingManager
from abstractcore import create_llm
# Initialize components
embedder = EmbeddingManager()
llm = create_llm("openai", model="gpt-4o-mini")
# Documents to search
documents = [
"Python is great for data science and machine learning.",
"JavaScript powers modern web applications.",
"Rust ensures memory safety without garbage collection."
]
# Create embeddings
doc_embeddings = embedder.embed_batch(documents)
# User query
query = "Tell me about web development"
query_embedding = embedder.embed(query)
# Find most similar document
similarities = [
embedder.compute_similarity(query_embedding, doc_emb)
for doc_emb in doc_embeddings
]
best_doc_idx = similarities.index(max(similarities))
context = documents[best_doc_idx]
# Generate response with context
response = llm.generate(
f"Context: {context}\n\nQuestion: {query}\n\nAnswer:"
)
print(response.content)
Advanced Tool Calling
ToolsCreate sophisticated tool-enabled applications that work across all providers.
from abstractcore import create_llm, tool
import requests
import json
@tool
def search_web(query: str) -> str:
"""Search the web for information."""
# Simplified example - use actual search API
return f"Search results for '{query}': [relevant information]"
@tool
def save_to_file(filename: str, content: str) -> str:
"""Save content to a file."""
with open(filename, 'w') as f:
f.write(content)
return f"Saved content to {filename}"
@tool
def get_weather(city: str) -> str:
"""Get current weather for a city."""
# Simplified example - use actual weather API
return f"Weather in {city}: 72°F, sunny"
# Create LLM with tools
llm = create_llm("anthropic", model="claude-3-5-haiku-latest")
# Complex multi-tool request
response = llm.generate(
"""
I need you to:
1. Search for information about Python web frameworks
2. Get the weather in San Francisco
3. Save a summary to a file called 'research.txt'
Please complete these tasks in order.
""",
tools=[search_web, get_weather, save_to_file]
)
print(response.content)
Complex Structured Output
Type SafeExtract complex structured data with automatic validation and retry.
from pydantic import BaseModel, Field
from typing import List, Optional
from abstractcore import create_llm
class Address(BaseModel):
street: str
city: str
state: str
zip_code: str
class Person(BaseModel):
name: str
age: int = Field(gt=0, le=150)
email: Optional[str] = None
address: Address
skills: List[str]
class Company(BaseModel):
name: str
employees: List[Person]
founded_year: int
headquarters: Address
llm = create_llm("openai", model="gpt-4o-mini")
# Extract complex nested data
company_data = """
TechCorp was founded in 2010 and is headquartered at
123 Tech Street, San Francisco, CA 94105.
Employees:
- John Doe, 30, john@techcorp.com, lives at 456 Oak Ave,
Palo Alto, CA 94301. Skills: Python, JavaScript, React.
- Jane Smith, 28, jane@techcorp.com, lives at 789 Pine St,
Mountain View, CA 94041. Skills: Go, Kubernetes, Docker.
"""
company = llm.generate(
f"Extract company information: {company_data}",
response_model=Company
)
print(f"Company: {company.name}")
print(f"Founded: {company.founded_year}")
print(f"Employees: {len(company.employees)}")
for emp in company.employees:
print(f" - {emp.name}, {emp.age}, {emp.address.city}")
print(f" Skills: {', '.join(emp.skills)}")
Real-Time Streaming
StreamingBuild interactive applications with real-time streaming responses.
from abstractcore import create_llm, tool
import time
@tool
def get_system_time() -> str:
"""Get the current system time."""
return time.strftime("%Y-%m-%d %H:%M:%S")
def interactive_chat():
llm = create_llm("openai", model="gpt-4o-mini")
print("š¤ AI Assistant (type 'quit' to exit)")
print("=" * 40)
while True:
user_input = input("\nš¤ You: ")
if user_input.lower() == 'quit':
break
print("š¤ AI: ", end="", flush=True)
# Stream response with tool support
for chunk in llm.generate(
user_input,
tools=[get_system_time],
stream=True
):
print(chunk.content, end="", flush=True)
# Handle tool calls during streaming
if chunk.tool_calls:
print("\nš ļø [Tool executed]", flush=True)
print("š¤ AI: ", end="", flush=True)
print() # New line after response
# Run the interactive chat
if __name__ == "__main__":
interactive_chat()
Image Analysis
MediaAnalyze images across all providers with automatic optimization and vision fallback.
from abstractcore import create_llm
# Works with any vision-capable provider
llm = create_llm("openai", model="gpt-4o")
# Single image analysis
response = llm.generate(
"What objects do you see in this image? Describe the scene.",
media=["photo.jpg"]
)
print(response.content)
# Multiple images comparison
response = llm.generate(
"Compare these three images and identify common themes",
media=["image1.jpg", "image2.jpg", "image3.jpg"]
)
print(response.content)
# Works with vision fallback for text-only models
text_llm = create_llm("lmstudio", model="qwen/qwen3-next-80b")
response = text_llm.generate(
"Analyze this image",
media=["complex_scene.jpg"]
)
# Vision model analyzes ā text model processes description
print(response.content)
Document Processing
MediaProcess PDFs, Office documents, and data files with the same simple API.
from abstractcore import create_llm
llm = create_llm("anthropic", model="claude-3.5-sonnet")
# PDF analysis
response = llm.generate(
"Summarize the key findings in this report",
media=["annual_report.pdf"]
)
print(response.content)
# Office documents
response = llm.generate(
"Extract action items from this presentation",
media=["meeting_slides.pptx"]
)
print(response.content)
# Excel data analysis
response = llm.generate(
"What trends do you see in this sales data?",
media=["sales_data.xlsx"]
)
print(response.content)
# CSV analysis
response = llm.generate(
"Calculate the average and identify outliers",
media=["metrics.csv"]
)
print(response.content)
Multi-Media Analysis
MediaCombine images, documents, and data files in a single analysis request.
from abstractcore import create_llm
llm = create_llm("openai", model="gpt-4o")
# Mixed media: chart + spreadsheet + report
response = llm.generate(
"""
Compare the chart visualization with the raw data
in the spreadsheet and verify it matches the
findings in the PDF report. Identify any discrepancies.
""",
media=["quarterly_chart.png", "data.xlsx", "report.pdf"]
)
print(response.content)
# Real-world use case: Product analysis
response = llm.generate(
"""
Analyze this product launch:
- Review the design mockups
- Check the market data
- Summarize the strategy document
Provide recommendations for launch timing.
""",
media=[
"product_mockup_1.jpg",
"product_mockup_2.jpg",
"market_research.csv",
"launch_strategy.docx"
]
)
print(response.content)
Session Management
MemoryBuild applications with persistent conversation memory and analytics.
from abstractcore import create_llm, BasicSession
import json
def create_tutoring_session():
llm = create_llm("anthropic", model="claude-3-5-haiku-latest")
# Create session with system prompt
session = BasicSession(
provider=llm,
system_prompt="""
You are a helpful Python programming tutor.
Remember the student's progress and adapt your
teaching style to their level.
"""
)
# Simulate a tutoring conversation
topics = [
"I'm new to Python. Can you explain variables?",
"How do I create a list?",
"What's the difference between a list and a tuple?",
"Can you give me a practice exercise?",
"I'm confused about the exercise. Can you help?"
]
for i, topic in enumerate(topics, 1):
print(f"\n--- Session {i} ---")
print(f"Student: {topic}")
response = session.generate(topic)
print(f"Tutor: {response.content[:100]}...")
# Add metadata to track progress
session.add_message(
'user',
topic,
lesson_number=i,
topic_category="python_basics"
)
# Save session with analytics
session.save(
'tutoring_session.json',
summary=True, # Generate conversation summary
assessment=True, # Assess learning progress
facts=True # Extract key facts learned
)
print("\nš Session saved with analytics!")
# Load and continue session later
loaded_session = BasicSession.load(
'tutoring_session.json',
provider=llm
)
# Continue conversation
followup = loaded_session.generate(
"Can you summarize what we've covered so far?"
)
print(f"\nSummary: {followup.content}")
if __name__ == "__main__":
create_tutoring_session()
CLI Media Handling
Use the simple @filename syntax in CLI for instant file attachment
# Image analysis
python -m abstractcore.utils.cli --prompt "What's in @photo.jpg"
# PDF document
python -m abstractcore.utils.cli --prompt "Summarize @report.pdf"
# Office documents
python -m abstractcore.utils.cli --prompt "Extract key points from @slides.pptx"
python -m abstractcore.utils.cli --prompt "What data is in @spreadsheet.xlsx"
# Data files
python -m abstractcore.utils.cli --prompt "Analyze trends in @sales_data.csv"
# Multiple files
python -m abstractcore.utils.cli --prompt "Compare @chart.png and @data.csv"
# Mixed media analysis
python -m abstractcore.utils.cli --prompt "Verify @chart.png matches @data.xlsx and @report.pdf"
More Examples
Find more examples in the GitHub repository
Related Documentation
Getting Started
Quick setup guide
Centralized Configuration
Global defaults and settings
Media Handling
Universal file attachment system
Vision Capabilities
Image analysis with fallback
Tool Calling
Universal tool system
API Reference
Complete Python API
Built-in Tools
NewUse AbstractCore's comprehensive built-in tools for web scraping, file operations, and system tasks.
from abstractcore import create_llm
from abstractcore.tools.common_tools import fetch_url, search_files, read_file
# Use built-in tools with any provider
llm = create_llm("anthropic", model="claude-3-5-haiku-latest")
# Web content analysis
response = llm.generate(
"Analyze this API and summarize the key endpoints",
tools=[fetch_url]
)
# File system operations
response = llm.generate(
"Find all Python functions in this project and list their purposes",
tools=[search_files, read_file]
)
# Built-in tools include:
# - fetch_url: Intelligent web content fetching
# - search_files: Regex search in files
# - read_file: Read file contents
# - write_file: Write to files
# - edit_file: Pattern-based file editing
# - web_search: DuckDuckGo search
# - execute_command: Safe shell execution