From a0e5ee17158d99bd2962d4e25b753b302d2edf12 Mon Sep 17 00:00:00 2001
From: cloudroam <cloudroam>
Date: 星期一, 16 六月 2025 09:07:27 +0800
Subject: [PATCH] add:影视作品处理
---
test2.py | 304 +++++++++++++++++++++++
test.py | 103 +++++++
test4.py | 48 +++
douban_mv.py | 113 ++++++++
douban.py | 13 +
douban_tv.py | 147 +++++++++++
6 files changed, 728 insertions(+), 0 deletions(-)
diff --git a/douban.py b/douban.py
new file mode 100644
index 0000000..3608f81
--- /dev/null
+++ b/douban.py
@@ -0,0 +1,13 @@
+from douban_tv import DoubanTVSpider
+from scrapy.crawler import CrawlerProcess
+
+if __name__ == "__main__":
+ process = CrawlerProcess(settings={
+ "USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/112.0.0.0 Safari/537.36",
+ "DOWNLOAD_DELAY": 2,
+ "CONCURRENT_REQUESTS": 1,
+ "ROBOTSTXT_OBEY": False,
+ "LOG_LEVEL": "INFO"
+ })
+ process.crawl(DoubanTVSpider)
+ process.start()
\ No newline at end of file
diff --git a/douban_mv.py b/douban_mv.py
new file mode 100644
index 0000000..b77fb45
--- /dev/null
+++ b/douban_mv.py
@@ -0,0 +1,113 @@
+# import scrapy
+# import json
+# import requests
+# from datetime import datetime
+# from urllib.parse import urljoin
+#
+#
+# class DoubanTVSpider(scrapy.Spider):
+# name = 'douban_tv'
+# allowed_domains = ['movie.douban.com']
+#
+# # 使用start()替代已弃用的start_requests()
+# def start_requests(self):
+# headers = {
+# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/112.0.0.0 Safari/537.36'
+# }
+# # 同时爬取电影和电视剧
+# urls = [
+# "https://movie.douban.com/j/new_search_subjects?sort=U&tag=电视剧&start=0",
+# "https://movie.douban.com/j/new_search_subjects?sort=U&tag=电影&start=0"
+# ]
+#
+# for url in urls:
+# yield scrapy.Request(url, headers=headers, callback=self.parse)
+#
+# def parse(self, response):
+# try:
+# data = json.loads(response.text)
+# except json.JSONDecodeError:
+# self.logger.error("JSON解析失败: %s", response.url)
+# return
+#
+# for item in data.get('data', []):
+# detail_url = item.get('url')
+# if detail_url:
+# yield scrapy.Request(
+# detail_url,
+# callback=self.parse_detail,
+# meta={'item': item}
+# )
+#
+# # 分页逻辑
+# current_start = int(response.url.split('start=')[-1])
+# next_start = current_start + 20
+# if next_start < 60:
+# next_url = response.url.replace(f'start={current_start}', f'start={next_start}')
+# yield response.follow(next_url, callback=self.parse)
+#
+# def parse_detail(self, response):
+# item = response.meta['item']
+#
+# # 获取详细信息
+# title = item.get('title', '').strip()
+# rate = item.get('rate', '')
+# year = item.get('date', '')[:4] if item.get('date') else ''
+#
+# # 从详情页获取更多信息
+# info = response.css('#info').get('')
+#
+# # 获取导演
+# director = ','.join(response.css('#info .attrs a::text').getall()[:1])
+#
+# # 获取演员
+# actors = ','.join(response.css('#info .actor .attrs a::text').getall())
+#
+# # 获取类型
+# type_text = ','.join(response.css('#info span[property="v:genre"]::text').getall())
+#
+# # 获取制片方
+# producer = response.css('#info span:contains("制片国家/地区:") + ::text').get('').strip()
+#
+# # 获取剧情简介
+# synopsis = response.css('#link-report span[property="v:summary"]::text').get('').strip()
+#
+# # 获取封面图片
+# cover_url = response.css('#mainpic img::attr(src)').get('')
+# cover_alt = response.css('#mainpic img::attr(alt)').get('')
+#
+# # 构建API请求数据
+# api_data = {
+# 'nameCn': title,
+# 'nameEn': '', # 豆瓣API没有提供英文名
+# 'type': '电视剧' if '电视剧' in response.url else '电影',
+# 'releaseYear': f"{year}-01-01T00:00:00" if year else None,
+# 'director': director,
+# 'producer': producer,
+# 'actors': actors,
+# 'keywords': type_text,
+# 'synopsis': synopsis,
+# 'coverUrl': cover_url,
+# 'coverAlt': cover_alt
+# }
+#
+# # 发送数据到API
+# try:
+# response = requests.post(
+# 'http://192.168.1.213:8080/flower/api/filmWorks/new',
+# json=api_data,
+# headers={'Content-Type': 'application/json'}
+# )
+# if response.status_code == 200:
+# self.logger.info(f"成功保存作品: {title}")
+# else:
+# self.logger.error(f"保存失败: {title}, 状态码: {response.status_code}")
+# except Exception as e:
+# self.logger.error(f"API请求失败: {str(e)}")
+#
+# # 添加设置(可选)
+# custom_settings = {
+# 'DOWNLOAD_DELAY': 2, # 增加延迟,避免被封
+# 'CONCURRENT_REQUESTS': 1, # 限制并发请求
+# 'ROBOTSTXT_OBEY': False
+# }
\ No newline at end of file
diff --git a/douban_tv.py b/douban_tv.py
new file mode 100644
index 0000000..d3a8796
--- /dev/null
+++ b/douban_tv.py
@@ -0,0 +1,147 @@
+import scrapy
+import json
+import requests
+from datetime import datetime
+from urllib.parse import urljoin
+
+
+class DoubanTVSpider(scrapy.Spider):
+ name = 'douban_tv'
+ allowed_domains = ['movie.douban.com']
+
+ # 使用start()替代已弃用的start_requests()
+ def start_requests(self):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/112.0.0.0 Safari/537.36',
+ 'Referer': 'https://movie.douban.com/tv/'
+ }
+ # 同时爬取电影和电视剧
+ # urls = [
+ # "https://movie.douban.com/j/new_search_subjects?sort=U&tag=电视剧&start=0",
+ # "https://movie.douban.com/j/new_search_subjects?sort=U&tag=电影&start=0"
+ # ]
+ # 路径2最新电视剧
+ urls = [
+ "https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%83%AD%E9%97%A8&sort=recommend&start=0"
+ ]
+ for url in urls:
+ yield scrapy.Request(url, headers=headers, callback=self.parse)
+
+ def parse(self, response):
+ try:
+ data = json.loads(response.text)
+ self.logger.info(f"解析到的数据: {data}") # 添加日志
+ except json.JSONDecodeError:
+ self.logger.error("JSON解析失败: %s", response.url)
+ return
+
+ # 检查数据结构
+ items = data.get('subjects', []) # 新API使用subjects字段
+ if not items:
+ self.logger.error("未找到subjects数据")
+ return
+
+ for item in items:
+ detail_url = item.get('url')
+ if detail_url:
+ yield scrapy.Request(
+ detail_url,
+ callback=self.parse_detail,
+ meta={'item': item}
+ )
+
+ # 分页逻辑
+ current_start = int(response.url.split('start=')[-1])
+ next_start = current_start + 20
+ if next_start < 60:
+ next_url = response.url.replace(f'start={current_start}', f'start={next_start}')
+ yield response.follow(next_url, callback=self.parse)
+
+ def parse_detail(self, response):
+ item = response.meta['item']
+
+ # 从 JSON 中提取了 url 字段(格式: https://movie.douban.com/subject/25754848/),用于后续请求。
+
+ # 获取详细信息
+ title = item.get('title', '').strip()
+ rate = item.get('rate', '')
+ year = item.get('date', '')[:4] if item.get('date') else ''
+
+ # 从详情页获取更多信息
+ info = response.css('#info').get('')
+
+ # 获取导演
+ directors = response.css('#info span:contains("导演") + .attrs a::text').getall()
+ director = ','.join(directors)
+
+ # 获取演员
+ # 使用相邻兄弟选择器定位
+ actor = response.css('#info span:contains("主演") + .attrs a::text').getall()
+ actors = ','.join(actor)
+
+ # 获取类型
+ type_text = ','.join(response.css('#info span[property="v:genre"]::text').getall())
+
+ # 获取制片方
+ producer = response.xpath('//span[contains(text(), "制片国家/地区")]/following::text()[1]').get().strip()
+
+ # 获取剧情简介
+ # 同时提取可见和隐藏内容
+ # 提取简介(最终方案)
+ synopsises = response.xpath('//span[@property="v:summary"]//text()').getall()
+ synopsis = ' '.join(''.join(synopsises).strip().split())
+
+ # 提取首播年份
+ # release_date = response.css('span[property="v:initialReleaseDate"]::text').get()
+ # year = release_date.split('-')[0] if release_date else "未知"
+
+ # 提取年份(最优方案)
+ year= response.css('.year::text').re_first(r'(\d+)') # 直接匹配数字
+
+ # 如果正则失效的备选方案
+ if not year:
+ year_text = response.css('.year::text').get()
+ year = year_text.strip('()') if year_text else "未知"
+
+ # 获取封面图片
+ cover_url = response.css('#mainpic img::attr(src)').get('')
+ cover_alt = response.css('#mainpic img::attr(alt)').get('')
+
+ # 构建API请求数据
+ api_data = {
+ 'nameCn': title,
+ 'nameEn': '', # 豆瓣API没有提供英文名
+ 'type': 'tv',
+ 'releaseYear': year,
+ 'director': director,
+ 'producer': producer,
+ 'actors': actors,
+ 'keywords': type_text,
+ 'synopsis': synopsis,
+ 'coverUrl': cover_url,
+ 'coverAlt': cover_alt,
+ 'userType': 'official'
+
+ }
+
+ # 发送数据到API
+ try:
+ response = requests.post(
+ 'http://192.168.1.213:8080/flower/api/filmWorks/new',
+ json=api_data,
+ headers={'Content-Type': 'application/json'}
+ )
+ if response.status_code == 200:
+ self.logger.info(f"成功保存作品: {title}")
+ else:
+ self.logger.error(f"保存失败: {title}, 状态码: {response.status_code}")
+ except Exception as e:
+ self.logger.error(f"API请求失败: {str(e)}")
+
+ # 添加设置(可选)
+ custom_settings = {
+ 'DOWNLOAD_DELAY': 2, # 增加延迟,避免被封
+ 'CONCURRENT_REQUESTS': 1, # 限制并发请求
+ 'ROBOTSTXT_OBEY': False,
+ 'LOG_LEVEL': 'DEBUG' # 设置日志级别为DEBUG以查看更多信息
+ }
\ No newline at end of file
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..2e04c07
--- /dev/null
+++ b/test.py
@@ -0,0 +1,103 @@
+# import requests
+# import json
+#
+#
+# def deepseek_generation(prompt, api_key):
+# """
+# 调用DeepSeek模型的生成接口
+# 参数:
+# prompt: 用户输入的提示文本
+# api_key: 用户的API密钥
+# 返回:
+# str: 模型生成的文本
+# """
+# url = "https://api.deepseek.com/v1/chat/completions"
+#
+# headers = {
+# "Content-Type": "application/json",
+# "Authorization": f"Bearer {api_key}",
+# "Accept": "application/json"
+# }
+#
+# # payload = {
+# # "model": "deepseek-reasoner",
+# # "messages": [
+# # {"role": "user", "content": prompt}
+# # ],
+# # "temperature": 0.3,
+# # "max_tokens": 2048
+# # }
+# payload = {
+# "model": "deepseek-chat", # 确认模型名称
+# "messages": [
+# {"role": "system", "content": "你是一个有帮助的助手"},
+# {"role": "user", "content": prompt}
+# ],
+# "temperature": 0.7, # 调整为官网默认值
+# "max_tokens": 2048,
+# "top_p": 1,
+# "frequency_penalty": 0
+# }
+# try:
+# response = requests.post(url, headers=headers, data=json.dumps(payload))
+# response.raise_for_status()
+#
+# result = response.json()
+# return result['choices'][0]['message']['content']
+# except requests.exceptions.RequestException as e:
+# print(f"API请求错误: {e}")
+# return None
+# except KeyError:
+# print("响应格式解析失败")
+# return None
+#
+#
+# if __name__ == "__main__":
+# # 使用示例
+# api_key = "sk-055784b1d4904ce6a34b5733a091c130" # 替换为你的实际API密钥
+# user_input = input("请输入您的问题:")
+#
+# response = deepseek_generation(user_input, api_key)
+#
+# if response:
+# print("\nDeepSeek回答:")
+# print(response)
+# else:
+# print("生成内容时出现错误")
+
+
+import os
+from openai import OpenAI
+
+client = OpenAI(
+ api_key = os.environ.get("5017c24f-581f-48fb-abef-8ac4654e9018"),
+ base_url = "https://ark.cn-beijing.volces.com/api/v3",
+)
+
+# Non-streaming:
+print("----- standard request -----")
+completion = client.chat.completions.create(
+ model = "deepseek-r1-250120", # your model endpoint ID
+ messages = [
+ {"role": "system", "content": "你是人工智能助手"},
+ {"role": "user", "content": "帮我生成无忧渡电视剧中的实景拍摄地和行程安排?"},
+ ],
+)
+print(completion.choices[0].message.content)
+
+# Streaming:
+print("----- streaming request -----")
+stream = client.chat.completions.create(
+ model = "deepseek-r1-250120", # your model endpoint ID
+ messages = [
+ {"role": "system", "content": "你是人工智能助手"},
+ {"role": "user", "content": "帮我生成无忧渡电视剧中的实景拍摄地和行程安排?"},
+ ],
+ stream=True
+)
+
+for chunk in stream:
+ if not chunk.choices:
+ continue
+ print(chunk.choices[0].delta.content, end="")
+print()
\ No newline at end of file
diff --git a/test2.py b/test2.py
new file mode 100644
index 0000000..cedc051
--- /dev/null
+++ b/test2.py
@@ -0,0 +1,304 @@
+import os
+import requests
+import json
+from datetime import datetime
+from openai import OpenAI
+import re
+
+
+# 初始化 OpenAI 客户端
+client = OpenAI(
+ # 此为默认路径,您可根据业务所在地域进行配置
+ base_url="https://ark.cn-beijing.volces.com/api/v3/bots",
+ # 从环境变量中获取您的 API Key
+ api_key="5017c24f-581f-48fb-abef-8ac4654e9018", # 直接填写密钥
+)
+
+
+def generate_xiaohongshu_article(film_name, locations):
+ """生成小红书风格的文章"""
+ prompt = f"""请为电影《{film_name}》的拍摄地写一篇小红书风格的文章。要求:
+1. 标题要吸引人,包含电影名称和拍摄地
+2. 开头要吸引人,可以用电影中的经典台词或场景引入
+3. 详细介绍每个拍摄地点的特色和亮点
+4. 包含实用的参观建议和交通信息
+5. 使用emoji表情增加趣味性
+6. 最后要有总结和推荐
+7. 添加相关标签
+
+请以html格式返回,包含标题、正文和标签.生成内容字体要符合在小程序上合适的大小阅读,标题不要超过h3。"""
+
+ completion = client.chat.completions.create(
+ model="bot-20250512103613-6rwj8",
+ messages=[
+ {"role": "system", "content": "你是一个专业的小红书文案写手,擅长写吸引人的旅游攻略。"},
+ {"role": "user", "content": prompt},
+ ],
+ )
+
+ return completion.choices[0].message.content
+
+def generate_travel_route(film_name, locations):
+ """生成游玩路线"""
+ prompt = f"""请为电影《{film_name}》的拍摄地设计一条最优游玩路线。要求:
+1. 考虑各个景点之间的距离和交通方式
+2. 合理安排游览顺序,避免来回奔波
+3. 预估每个景点的游览时间
+4. 提供具体的交通建议
+5. 考虑用餐和休息时间
+6. 提供备选方案
+
+请以JSON格式返回,格式如下:
+{{
+ "route_name": "路线名称",
+ "total_time": "预计总时间",
+ "stops": [
+ {{
+ "location_name": "地点名称",
+ "visit_time": "建议游览时间",
+ "transportation": "前往下一个地点的交通方式",
+ "tips": "游览建议"
+ }}
+ ],
+ "alternative_routes": [
+ {{
+ "route_name": "备选路线名称",
+ "description": "路线说明"
+ }}
+ ]
+}}"""
+
+ completion = client.chat.completions.create(
+ model="bot-20250512103613-6rwj8",
+ messages=[
+ {"role": "system", "content": "你是一个专业的旅游路线规划师,擅长设计最优游览路线。"},
+ {"role": "user", "content": prompt},
+ ],
+ )
+
+ try:
+ response_text = completion.choices[0].message.content
+ cleaned_text = re.sub(r'```json|```', '', response_text)
+ cleaned_text = re.sub(r'^[^{[]*', '', cleaned_text)
+ return json.loads(cleaned_text)
+ except Exception as e:
+ print(f"解析路线失败: {str(e)}\n原始响应: {response_text}")
+ return None
+
+def generate_xiaohongshu_route(film_name, locations):
+ """生成小红书风格的路线攻略"""
+ prompt = f"""请为电影《{film_name}》的拍摄地写一篇小红书风格的路线攻略。要求:
+1. 标题要吸引人,突出"最佳路线"或"完美行程"等关键词
+2. 开头要说明这条路线是如何规划的,为什么这样安排
+3. 详细介绍每个景点的游览时间和交通方式
+4. 使用emoji表情增加趣味性
+5. 提供实用的时间安排建议
+6. 包含备选路线方案
+7. 最后要有总结和注意事项
+8. 添加相关标签
+
+请以html格式返回,包含标题、正文和标签,生成内容字体要符合在小程序上合适的大小阅读,标题不要超过h3。"""
+
+ completion = client.chat.completions.create(
+ model="bot-20250512103613-6rwj8",
+ messages=[
+ {"role": "system", "content": "你是一个专业的小红书文案写手,擅长写吸引人的旅游路线攻略。"},
+ {"role": "user", "content": prompt},
+ ],
+ )
+
+ return completion.choices[0].message.content
+
+def get_film_works():
+ url = "http://192.168.1.213:8090/flower/api/filmWorks/all"
+ response = requests.get(url)
+ if response.status_code == 200:
+ return response.json().get("data", [])
+ return []
+
+
+def get_location_info_from_model(film_name):
+ """使用模型获取多个拍摄地点信息"""
+ prompt = f"""请为电影《{film_name}》生成所有主要拍摄地点的详细信息。每部电影通常有多个拍摄地点,请尽可能详细地列出所有重要的拍摄地点。
+
+对于每个拍摄地点,请提供以下信息:
+1. 拍摄地点名称
+2. 详细地址
+3. 场景类型
+4. 经典画面描述
+5. 是否开放参观
+6. 参观提示
+7. 地标性建筑描述
+8. 交通指引说明
+9. 停车场信息
+10. 周边设施描述
+
+请以JSON数组格式返回,格式如下:
+[
+ {{
+ "locationName": "地点1名称",
+ "address": "地点1详细地址",
+ "gpsLat": "纬度坐标(精确到小数点后6位)",
+ "gpsLng": "经度坐标(精确到小数点后6位)",
+ "sceneType": "场景类型",
+ "classicScene": "经典画面描述",
+ "isOpenVisit": 0或1,
+ "visitInfo": "参观提示",
+ "landmarkDesc": "地标描述",
+ "transportGuide": "交通指引",
+ "parkingInfo": "停车信息",
+ "surroundingFacilities": "周边设施"
+ }},
+ {{
+ "locationName": "地点2名称",
+ ...
+ }}
+]
+
+请确保返回所有重要的拍摄地点,每个地点都要包含完整的信息。"""
+ # Non-streaming:
+ print("----- standard request -----")
+ completion = client.chat.completions.create(
+ model="bot-20250512103613-6rwj8", # bot-20250512103613-6rwj8 为您当前的智能体的ID,注意此处与Chat API存在差异。差异对比详见 SDK使用指南
+ # messages=[
+ # {"role": "system", "content": "你是DeepSeek,是一个 AI 人工智能助手"},
+ # {"role": "user", "content": "常见的十字花科植物有哪些?"},
+ # ],
+ messages=[
+ {"role": "system",
+ "content": "你是一个专业的影视拍摄地点信息专家,请根据电影名称生成所有重要拍摄地点的详细信息。"},
+ {"role": "user", "content": prompt},
+ ],
+ )
+ print(completion.choices[0].message.content)
+ if hasattr(completion, "references"):
+ print(completion.references)
+ if hasattr(completion.choices[0].message, "reasoning_content"):
+ print(completion.choices[0].message.reasoning_content) # 对于R1模型,输出reasoning content
+ # try:
+ # # 尝试解析模型返回的JSON
+ # response_text = completion.choices[0].message.content
+ # location_info_list = json.loads(response_text)
+ # return location_info_list
+ # except:
+ # # 如果解析失败,返回默认值
+ # return None
+ try:
+ response_text = completion.choices[0].message.content
+ # 清理响应内容
+ cleaned_text = re.sub(r'```json|```', '', response_text) # 移除Markdown标记
+ cleaned_text = re.sub(r'^[^{[]*', '', cleaned_text) # 清理开头非JSON内容
+
+ # 解析
+ location_info_list = json.loads(cleaned_text)
+ return location_info_list
+ except Exception as e:
+ print(f"解析失败: {str(e)}\n原始响应: {response_text}")
+ return None
+
+
+def create_film_location(film_id, film_name, location_info, article, route_article):
+ url = "http://192.168.1.213:8090/flower/api/filmLocation/new"
+
+ # 默认值设置
+ default_data = {
+ "filmId": film_id,
+ "locationName": f"{film_name}拍摄地",
+ "address": "待补充",
+ "gpsLat": 0.0,
+ "gpsLng": 0.0,
+ "startDate": datetime.now().strftime("%Y-%m-%d"),
+ "endDate": datetime.now().strftime("%Y-%m-%d"),
+ "sceneType": "外景",
+ "classicScene": "待补充",
+ "isOpenVisit": 0,
+ "visitInfo": json.dumps({"tips": "暂无参观信息"}),
+ "landmarkDesc": "待补充",
+ "transportGuide": "待补充",
+ "parkingInfo": "待补充",
+ "surroundingFacilities": "待补充",
+ "arEntry": "",
+ "status": 1,
+ "deleted": 0,
+ "createBy": "system",
+ "updateBy": "system",
+ "checkinCount": 0,
+ "visitorPhotos": json.dumps([]),
+ "xiaohongshuArticle": article, # 新增:小红书风格文章
+ "xiaohongshuRoute": route_article # 新增:小红书风格路线
+ }
+
+ # 更新默认值
+ if location_info:
+ for key, value in location_info.items():
+ if key in default_data:
+ if key in ['visit_info', 'visitor_photos']:
+ default_data[key] = json.dumps(value)
+ else:
+ default_data[key] = value
+
+ response = requests.post(url, json=default_data)
+ return response.json()
+
+def save_article_and_route(film_id, film_name, article, route, route_article):
+ """保存文章和路线到文件"""
+ # 创建输出目录
+ output_dir = "output"
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+
+ # 保存文章
+ article_file = os.path.join(output_dir, f"{film_name}_article.md")
+ with open(article_file, "w", encoding="utf-8") as f:
+ f.write(article)
+
+ # 保存路线(JSON格式)
+ route_file = os.path.join(output_dir, f"{film_name}_route.json")
+ with open(route_file, "w", encoding="utf-8") as f:
+ json.dump(route, f, ensure_ascii=False, indent=2)
+
+ # 保存路线(小红书风格)
+ route_article_file = os.path.join(output_dir, f"{film_name}_route_article.md")
+ with open(route_article_file, "w", encoding="utf-8") as f:
+ f.write(route_article)
+
+def main():
+ # 获取所有电影作品
+ film_works = get_film_works()
+
+ # 为每个电影作品创建拍摄地点
+ for film in film_works:
+ film_name = film.get("nameCn")
+ film_id = film.get("id") # 新增:获取电影ID
+ if film_name:
+ print(f"正在处理电影: {film_name}")
+
+ # 获取所有拍摄地点信息
+ location_info_list = get_location_info_from_model(film_name)
+
+ if location_info_list:
+ # 生成小红书文章
+ article = generate_xiaohongshu_article(film_name, location_info_list)
+ print(f"\n生成的文章:\n{article}")
+
+ # 生成游玩路线(JSON格式)
+ route = generate_travel_route(film_name, location_info_list)
+ print(f"\n生成的路线:\n{json.dumps(route, ensure_ascii=False, indent=2)}")
+
+ # 生成小红书风格路线
+ route_article = generate_xiaohongshu_route(film_name, location_info_list)
+ print(f"\n生成的路线文章:\n{route_article}")
+
+ # 为每个拍摄地点创建记录
+ for location_info in location_info_list:
+ result = create_film_location(film_id, film_name, location_info, article, route_article)
+ print(f"创建拍摄地点 {location_info.get('locationName', '未知地点')} 结果: {result}")
+
+ # 保存文章和路线
+ save_article_and_route(film_id, film_name, article, route, route_article)
+ else:
+ print(f"未能获取到电影 {film_name} 的拍摄地点信息")
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/test4.py b/test4.py
new file mode 100644
index 0000000..f47af81
--- /dev/null
+++ b/test4.py
@@ -0,0 +1,48 @@
+import os
+from openai import OpenAI
+
+# 请确保您已将 API Key 存储在环境变量 ARK_API_KEY 中
+# 初始化Openai客户端,从环境变量中读取您的API Key
+client = OpenAI(
+ # 此为默认路径,您可根据业务所在地域进行配置
+ base_url="https://ark.cn-beijing.volces.com/api/v3/bots",
+ # 从环境变量中获取您的 API Key
+ api_key="5017c24f-581f-48fb-abef-8ac4654e9018", # 直接填写密钥
+)
+
+# Non-streaming:
+print("----- standard request -----")
+completion = client.chat.completions.create(
+ model="bot-20250512103613-6rwj8", # bot-20250512103613-6rwj8 为您当前的智能体的ID,注意此处与Chat API存在差异。差异对比详见 SDK使用指南
+ messages=[
+ {"role": "system", "content": "你是DeepSeek,是一个 AI 人工智能助手"},
+ {"role": "user", "content": "常见的十字花科植物有哪些?"},
+ ],
+)
+print(completion.choices[0].message.content)
+if hasattr(completion, "references"):
+ print(completion.references)
+if hasattr(completion.choices[0].message, "reasoning_content"):
+ print(completion.choices[0].message.reasoning_content) # 对于R1模型,输出reasoning content
+
+
+# # Streaming:
+# print("----- streaming request -----")
+# stream = client.chat.completions.create(
+# model="bot-20250512103613-6rwj8", # bot-20250512103613-6rwj8 为您当前的智能体的ID,注意此处与Chat API存在差异。差异对比详见 SDK使用指南
+# messages=[
+# {"role": "system", "content": "你是DeepSeek,是一个 AI 人工智能助手"},
+# {"role": "user", "content": "常见的十字花科植物有哪些?"},
+# ],
+# stream=True,
+# )
+# for chunk in stream:
+# if hasattr(chunk, "references"):
+# print(chunk.references)
+# if not chunk.choices:
+# continue
+# if chunk.choices[0].delta.content:
+# print(chunk.choices[0].delta.content, end="")
+# elif hasattr(chunk.choices[0].delta, "reasoning_content"):
+# print(chunk.choices[0].delta.reasoning_content)
+print()
\ No newline at end of file
--
Gitblit v1.9.3