Stable Diffusion API 教程
开源图像生成模型,支持ControlNet、LoRA等高级功能
开源模型
完全开放使用
高度可控
ControlNet精控
LoRA微调
自定义风格
多种模式
txt2img/img2img
一、基础图像生成
快速开始
import requests
import json
# Stable Diffusion API 基础使用
def generate_image(prompt: str, model: str = "stable-diffusion-xl"):
"""使用Stable Diffusion生成图像"""
url = "https://api.n1n.ai/v1/images/generations"
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
data = {
"model": model, # stable-diffusion-xl, stable-diffusion-v1-5
"prompt": prompt,
"negative_prompt": "low quality, blurry, distorted",
"num_inference_steps": 30, # 推理步数
"guidance_scale": 7.5, # 提示词引导强度
"width": 1024,
"height": 1024,
"seed": -1, # -1为随机种子
"num_images": 1
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
return result["images"][0]["url"]
else:
raise Exception(f"生成失败: {response.text}")
# SDXL高级参数
def generate_sdxl_advanced(prompt: str, style_preset: str = None):
"""SDXL高级生成"""
data = {
"model": "stable-diffusion-xl",
"prompt": prompt,
"negative_prompt": "ugly, tiling, poorly drawn, out of frame",
"num_inference_steps": 50,
"guidance_scale": 8.0,
"width": 1024,
"height": 1024,
"sampler": "DPM++ 2M Karras", # 采样器
"scheduler": "karras", # 调度器
"style_preset": style_preset, # anime, photographic, digital-art
"clip_guidance_preset": "FAST_BLUE",
"refiner": True # 启用精炼器
}
response = requests.post(url, headers=headers, json=data)
return response.json()二、可用模型列表
SDXL 1.0
最新高质量模型
分辨率: 1024x1024
SD 1.5
经典稳定版本
分辨率: 512x512
SD 2.1
改进版本
分辨率: 768x768
SDXL Turbo
快速生成
分辨率: 1024x1024
SD Inpaint
局部重绘专用
分辨率: 512x512
SDXL Refiner
细节增强
分辨率: 1024x1024
三、ControlNet 精确控制
高级控制功能
# ControlNet 控制生成
def generate_with_controlnet(image_url: str, prompt: str, control_type: str):
"""使用ControlNet精确控制图像生成"""
url = "https://api.n1n.ai/v1/images/controlnet"
data = {
"model": "stable-diffusion-xl",
"prompt": prompt,
"control_image": image_url, # 控制图像
"control_type": control_type, # canny, depth, openpose, scribble
"control_strength": 1.0, # 控制强度
"num_inference_steps": 30,
"guidance_scale": 7.5
}
response = requests.post(url, headers=headers, json=data)
return response.json()
# 图生图(img2img)
def image_to_image(source_image_url: str, prompt: str, strength: float = 0.75):
"""基于现有图像生成新图像"""
url = "https://api.n1n.ai/v1/images/img2img"
data = {
"model": "stable-diffusion-xl",
"init_image": source_image_url,
"prompt": prompt,
"strength": strength, # 0-1,值越高改变越大
"num_inference_steps": 50,
"guidance_scale": 7.5
}
response = requests.post(url, headers=headers, json=data)
return response.json()
# Inpainting 局部重绘
def inpaint(image_url: str, mask_url: str, prompt: str):
"""局部区域重绘"""
url = "https://api.n1n.ai/v1/images/inpaint"
data = {
"model": "stable-diffusion-xl-inpaint",
"image": image_url,
"mask": mask_url,
"prompt": prompt,
"num_inference_steps": 50,
"guidance_scale": 8.0,
"strength": 0.99
}
response = requests.post(url, headers=headers, json=data)
return response.json()四、LoRA 模型微调
风格定制
# LoRA 模型微调使用
class SDLoRAManager:
"""管理和使用LoRA模型"""
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://api.n1n.ai/v1"
def generate_with_lora(self, prompt: str, lora_models: list):
"""使用LoRA模型生成"""
data = {
"model": "stable-diffusion-xl",
"prompt": prompt,
"loras": [
{
"name": "realistic_vision",
"weight": 0.8 # LoRA权重
},
{
"name": "detail_enhancer",
"weight": 0.5
}
],
"num_inference_steps": 30,
"guidance_scale": 7.5,
"width": 1024,
"height": 1024
}
response = requests.post(
f"{self.base_url}/images/generate-lora",
headers={"Authorization": f"Bearer {self.api_key}"},
json=data
)
return response.json()
def list_available_loras(self):
"""列出可用的LoRA模型"""
response = requests.get(
f"{self.base_url}/models/loras",
headers={"Authorization": f"Bearer {self.api_key}"}
)
return response.json()
def train_custom_lora(self, dataset: list, config: dict):
"""训练自定义LoRA(需要高级权限)"""
data = {
"base_model": "stable-diffusion-xl",
"dataset": dataset, # 训练图像和标签
"training_config": {
"learning_rate": 1e-4,
"num_epochs": 100,
"batch_size": 1,
"gradient_accumulation_steps": 4,
"lora_rank": 32
}
}
response = requests.post(
f"{self.base_url}/models/train-lora",
headers={"Authorization": f"Bearer {self.api_key}"},
json=data
)
return response.json()
# 批量生成不同风格
def batch_generate_styles(prompt: str):
manager = SDLoRAManager(API_KEY)
styles = [
{"name": "anime", "lora": "anime_style", "weight": 0.9},
{"name": "realistic", "lora": "photorealistic", "weight": 0.8},
{"name": "fantasy", "lora": "fantasy_art", "weight": 0.7},
{"name": "cyberpunk", "lora": "cyberpunk_style", "weight": 0.85}
]
results = []
for style in styles:
result = manager.generate_with_lora(
prompt=f"{prompt}, {style['name']} style",
lora_models=[{"name": style['lora'], "weight": style['weight']}]
)
results.append(result)
return results五、最佳实践
🎯 参数优化
- ✅ Steps: 20-50步平衡质量和速度
- ✅ CFG Scale: 7-10适合大多数场景
- ✅ 使用负面提示词提高质量
- ✅ 固定种子确保可重现性
💡 提示词技巧
- ✅ 使用权重语法: (word:1.2)
- ✅ 添加质量词: masterpiece, best quality
- ✅ 指定艺术风格和画师
- ✅ 使用BREAK分隔概念