| | import gradio as gr |
| | import requests |
| | import json |
| |
|
| | |
| |
|
| | def query_huggingface_api(prompt, api_url, api_key): headers = {"Authorization": f"Bearer {api_key}"} payload = {"inputs": prompt} response = requests.post(api_url, headers=headers, json=payload) |
| |
|
| | if response.status_code == 200: |
| | return response.json()[0]['generated_text'] |
| | else: |
| | return f"Error: API request failed with status code {response.status_code}" |
| |
|
| | Define the Gradio interface |
| |
|
| | def chat_with_model(user_input): API_URL = "https://api-inference.huggingface.co/models/YOUR_MODEL_NAME" API_KEY = "YOUR_HF_API_KEY" return query_huggingface_api(user_input, API_URL, API_KEY) |
| |
|
| | iface = gr.Interface(fn=chat_with_model, inputs="text", outputs="text", title="ChatDev AI") |
| |
|
| | Launch the Gradio app |
| |
|
| | if name == "main": iface.launch() |
| |
|
| |
|