智普大模型API调用
用python调用智普免费API接口的例子,写成函数,类似于。
·
接口
用python调用智普免费API接口的例子,写成函数,
类似于
def get_answer(prompt):
url = 'http://34.132.32.68:8081/v1/chat/completions'
headers = {
'Content-Type': 'application/json',
}
data = {
"model": "Qwen2-72B-int4",
"messages": [
{"role": "system", "content": ""},
{"role": "user", "content": prompt}
],
"temperature": 1.2,
"max_tokens": 1000,
"top_k": 20,
"top_p": 0.8,
"repetition_penalty": 1.1
}
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200:
response_data = response.json()
if 'choices' in response_data and len(response_data['choices']) > 0:
return response_data['choices'][0]['message']['content']
else:
return "No content returned."
else:
return f"Request failed with status code {response.status_code}"
可用函数调用
from zhipuai import ZhipuAI
def get_answer(prompt):
# 使用GLM-4-Flash大模型进行验证
api_key = '51c3570a50bbaca5f9a1400'
client = ZhipuAI(api_key=api_key)
response = client.chat.completions.create(
model='glm-4-flash',
messages=[
{'role': 'user', 'content': prompt},
],
stream=True,
)
# 初始化输出字符串
output = ''
# 流式输出
for chunk in response:
output += chunk.choices[0].delta.content
return output
# 使用示例
# response_text = get_glm4_flash_response('你好,我是老牛同学,请问你是谁?')
# print(response_text)
更多推荐



所有评论(0)