Python SDK完整教程

从零开始学习如何使用Python SDK调用大模型API,构建智能应用。 本教程涵盖从环境配置到高级功能的完整内容。

快速开始

1. 安装SDK

# 使用pip安装
pip install openai

# 或使用conda
conda install -c conda-forge openai

# 安装特定版本
pip install openai==1.6.0

2. 基础配置

import os
from openai import OpenAI

# 方法1:环境变量
os.environ["OPENAI_API_KEY"] = "your-api-key"
client = OpenAI()

# 方法2:直接传入
client = OpenAI(
    api_key="your-api-key",
    base_url="https://api.openai.com/v1"  # 可选:自定义端点
)

# 方法3:从配置文件读取
import json
with open('config.json') as f:
    config = json.load(f)
client = OpenAI(api_key=config['api_key'])

基础功能

聊天补全

# 简单对话
response = client.chat.completions.create(
    model="gpt-4",
    messages=[
        {"role": "system", "content": "你是一个友好的助手"},
        {"role": "user", "content": "介绍一下Python"}
    ]
)

print(response.choices[0].message.content)

# 流式输出
stream = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "写一篇文章"}],
    stream=True
)

for chunk in stream:
    if chunk.choices[0].delta.content is not None:
        print(chunk.choices[0].delta.content, end="")

高级功能

Function Calling

# 定义函数
functions = [
    {
        "name": "get_weather",
        "description": "获取天气信息",
        "parameters": {
            "type": "object",
            "properties": {
                "location": {
                    "type": "string",
                    "description": "城市名称"
                },
                "unit": {
                    "type": "string",
                    "enum": ["celsius", "fahrenheit"]
                }
            },
            "required": ["location"]
        }
    }
]

# 调用带函数的模型
response = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "北京天气怎么样?"}],
    functions=functions,
    function_call="auto"
)

# 处理函数调用
if response.choices[0].message.function_call:
    function_name = response.choices[0].message.function_call.name
    function_args = json.loads(
        response.choices[0].message.function_call.arguments
    )
    
    # 执行实际函数
    if function_name == "get_weather":
        weather_data = get_weather(**function_args)
        
        # 将结果返回给模型
        second_response = client.chat.completions.create(
            model="gpt-4",
            messages=[
                {"role": "user", "content": "北京天气怎么样?"},
                response.choices[0].message,
                {
                    "role": "function",
                    "name": function_name,
                    "content": str(weather_data)
                }
            ]
        )

错误处理

import time
from openai import OpenAI, RateLimitError, APIError

def call_api_with_retry(prompt, max_retries=3):
    """带重试机制的API调用"""
    for attempt in range(max_retries):
        try:
            response = client.chat.completions.create(
                model="gpt-4",
                messages=[{"role": "user", "content": prompt}],
                timeout=30  # 设置超时
            )
            return response
            
        except RateLimitError as e:
            # 速率限制,等待后重试
            wait_time = 2 ** attempt  # 指数退避
            print(f"速率限制,等待{wait_time}秒后重试...")
            time.sleep(wait_time)
            
        except APIError as e:
            # API错误
            print(f"API错误: {e}")
            if attempt == max_retries - 1:
                raise
                
        except Exception as e:
            # 其他错误
            print(f"未知错误: {e}")
            raise
    
    return None

性能优化

批量处理

import asyncio
from openai import AsyncOpenAI

# 异步客户端
async_client = AsyncOpenAI(api_key="your-key")

async def process_batch(prompts):
    """批量异步处理"""
    tasks = []
    for prompt in prompts:
        task = async_client.chat.completions.create(
            model="gpt-4",
            messages=[{"role": "user", "content": prompt}]
        )
        tasks.append(task)
    
    # 并发执行
    responses = await asyncio.gather(*tasks)
    return responses

# 使用示例
prompts = ["问题1", "问题2", "问题3"]
results = asyncio.run(process_batch(prompts))

实用工具类

class LLMHelper:
    """LLM辅助工具类"""
    
    def __init__(self, api_key, model="gpt-4"):
        self.client = OpenAI(api_key=api_key)
        self.model = model
        self.conversation_history = []
    
    def chat(self, message, remember=True):
        """对话方法"""
        if remember:
            self.conversation_history.append(
                {"role": "user", "content": message}
            )
        
        messages = self.conversation_history if remember else [
            {"role": "user", "content": message}
        ]
        
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages
        )
        
        assistant_message = response.choices[0].message.content
        
        if remember:
            self.conversation_history.append(
                {"role": "assistant", "content": assistant_message}
            )
        
        return assistant_message
    
    def summarize(self, text, max_length=100):
        """文本摘要"""
        prompt = f"请将以下文本总结为{max_length}字以内:\n{text}"
        return self.chat(prompt, remember=False)
    
    def translate(self, text, target_language="English"):
        """翻译"""
        prompt = f"将以下文本翻译成{target_language}:\n{text}"
        return self.chat(prompt, remember=False)
    
    def clear_history(self):
        """清除对话历史"""
        self.conversation_history = []

# 使用示例
helper = LLMHelper(api_key="your-key")
response = helper.chat("你好")
summary = helper.summarize("很长的文本...")
translation = helper.translate("你好", "English")

开始Python开发之旅

掌握Python SDK,快速构建强大的AI应用。

获取API密钥