import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline # Load FLAN-T5-base model (best size for CPU with decent quality) model_id = "deepseek-ai/deepseek-coder-1.3b-base" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer, device=-1) # Code generation function def generate_frontend_code(instruction, max_tokens=256, temperature=0.7): prompt = f"Write HTML/CSS/JS code for the following instruction:\n{instruction}" output = generator( prompt, max_new_tokens=max_tokens, temperature=temperature, do_sample=True, ) return output[0]["generated_text"] # Gradio UI with gr.Blocks() as demo: gr.Markdown("## 🌐 Front-End Code Generator") instruction = gr.Textbox(label="Enter Instruction", placeholder="e.g., Create a responsive navbar with dropdown", lines=4) max_tokens = gr.Slider(64, 512, value=256, label="Max Tokens") temperature = gr.Slider(0.1, 1.5, value=0.7, label="Temperature") generate_btn = gr.Button("Generate Code") code_output = gr.Code(label="Generated HTML/CSS/JS Code") generate_btn.click(fn=generate_frontend_code, inputs=[instruction, max_tokens, temperature], outputs=code_output) demo.launch()