本文主要是介绍Gradio测试-->Gradio映射-->可视化GPT4V API-->Gemini Pro、Claude和Qwen的API,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
Gradio测试
import gradio as gr
import timedef demo_test(text, image=None):time.sleep(1) # 正确的暂停调用return text, image if image is not None else None# 创建 Gradio 接口
iface = gr.Interface(fn=demo_test,inputs=[gr.Textbox(label="输入文本"), gr.Image(type="pil", label="上传图片")],outputs=[gr.Textbox(label="输出文本"), gr.Image(type="pil", label="输出图片")]
)iface.launch(server_name="0.0.0.0", server_port=1234)
Gradio映射
本地机器运行:
-L:指定远程机器端口是1234,本地机器的端口号的8888。
用户名:远程机器的用户名
ip地址:远程机器的IP地址
ssh -CNg -L localhost:8888:0.0.0.0:1234 用户名@ip地址 -p PID
Gradio可视化GPT4V API
1. GPT4V
注意:需要设置代理端口
import gradio as gr
import requests
import os
import base64
import io# 设置代理,以确保能够连接到 API
# os.environ["http_proxy"] = "127.0.0.1:58591"
# os.environ["https_proxy"] = "127.0.0.1:58591"# 你的 OpenAI API 密钥
api_key = "sk-"# 函数:将 PIL 图像对象编码为 base64 格式
def encode_image(image):if image is None:return None # 如果没有图片,则返回 Nonebuffered = io.BytesIO()try:image.save(buffered, format="JPEG")img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')return img_strexcept Exception as e:print(f"编码图像时出错: {e}")return None# 函数:处理 GPT-4 API 请求
def demo_test(text, image=None):message_content = [{"type": "text", "text": text}]if image is not None:base64_image = encode_image(image)if base64_image is not None:message_content.append({"type": "image_url","image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}})headers = {"Content-Type": "application/json","Authorization": f"Bearer {api_key}"}payload = {"model": "gpt-4-vision-preview","messages": [{"role": "user", "content": message_content}],"max_tokens": 3000}response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)response_text = response.json()["choices"][0]["message"]["content"] if response.ok else "处理请求时出错"print(response_text)return response_text, image if image is not None else None# 创建 Gradio 接口
iface = gr.Interface(fn=demo_test,inputs=[gr.Textbox(label="输入文本"), gr.Image(type="pil", label="上传图片")],outputs=[gr.Textbox(label="输出文本"), gr.Image(type="pil", label="输出图片")]
)iface.launch()
Gemini Pro、Claude和Qwen的API
1. Gemini Pro
# setup
import google.generativeai as genaigenai.configure(api_key='') # 填入自己的api_key# 查询模型
for m in genai.list_models():print(m.name)print(m.supported_generation_methods)
import PIL.Image
import os
# 创建模型实例
model = genai.GenerativeModel('gemini-pro-vision')
# 文件夹路径
folder_path = ''# 结果文件
results_file = ''count =0# 遍历文件夹中的图像
for filename in os.listdir(folder_path):if filename.endswith('.jpg') or filename.endswith('.png'): # 检查文件是否为图像# 图像路径image_path = os.path.join(folder_path, filename)img = PIL.Image.open(image_path)# 使用模型进行提问question = "描述一下这张图像"response = model.generate_content([question, img], stream=True)response.resolve()# 将结果写入文件with open(results_file, 'a') as file:file.write(f"{filename} {response.text}\n")# 更新计数器count += 1# 每处理100张图像打印一次if count % 5 == 0:print(f"已处理 {count} 张图像")print("处理完成!")
2. Claude
2.1 注册
- 注册可以免费使用chat网页版
- 5刀Claude 3 Opus使用额度
支付宝购买虚拟手机卡:https://sms-man.com/cn
2.2 API
…
3.Qwen
import os# 设置环境变量
os.environ['DASHSCOPE_API_KEY'] = 'sk-'# 之后您可以使用这个环境变量
api_key = os.environ['DASHSCOPE_API_KEY']from dashscope import MultiModalConversationdef call_with_local_file():"""Sample of use local file.linux&mac file schema: file:///home/images/test.pngwindows file schema: file://D:/images/abc.png"""local_file_path1 = 'file:///opt/data/private/434103892.jpg'messages = [{'role': 'system','content': [{'text': 'You are a helpful assistant.'}]}, {'role':'user','content': [{'image': local_file_path1},{'text': '图片里有什么东西?'},]}]response = MultiModalConversation.call(model='qwen-vl-max', messages=messages)# print(response)text_content = response['output']['choices'][0]['message']['content'][0]['text']print(text_content)if __name__ == '__main__':call_with_local_file()
这篇关于Gradio测试-->Gradio映射-->可视化GPT4V API-->Gemini Pro、Claude和Qwen的API的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!