Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load tokenizer and model (simulating EvoTransformer with GPT-2-like architecture) | |
| model_name = "gpt2" # Replace with fine-tuned EvoTransformer model on Hugging Face if available | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| model.eval() | |
| # Mock EvoTransformer architecture traits | |
| architecture = { | |
| "layers": 6, | |
| "heads": 8, | |
| "ffn_dim": 2048, | |
| "parameters": "58M" | |
| } | |
| def generate_response(user_input, max_length=100): | |
| # Tokenize input with a conversational prompt | |
| prompt = f"User: {user_input} Assistant: " | |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) | |
| input_ids = inputs["input_ids"] | |
| # Generate response | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| input_ids, | |
| max_length=max_length, | |
| num_return_sequences=1, | |
| do_sample=True, | |
| top_p=0.9, | |
| temperature=0.7, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| # Decode response | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| response = response[len(prompt):].strip() | |
| # Format output with architecture details | |
| arch_info = ( | |
| f"Model Architecture:\n" | |
| f"- Layers: {architecture['layers']}\n" | |
| f"- Attention Heads: {architecture['heads']}\n" | |
| f"- FFN Dimension: {architecture['ffn_dim']}\n" | |
| f"- Parameters: {architecture['parameters']}" | |
| ) | |
| return f"**Response**: {response}\n\n**{arch_info}**" | |
| # Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(lines=2, placeholder="Type your message here..."), | |
| outputs="markdown", | |
| title="EvoTransformer Chat Demo", | |
| description="Chat with a simplified EvoTransformer model, designed to evolve Transformer architectures. Enter a message to get a response and view model details." | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |