import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer import torch import os token = os.getenv("hf_token") @st.cache_resource def load_model(): model_name = "robzchhangte/bloomz-dv5-with-mztok" # model_name = "robzchhangte/10-vanillagpt2-ft-INS-dv5" tokenizer = AutoTokenizer.from_pretrained("robzchhangte/bloomz-dv5-with-mztok", token=token) model = AutoModelForCausalLM.from_pretrained(model_name, token=token) return tokenizer, model tokenizer, model = load_model() st.title("📝 Mizo Text Generator") prompt = st.text_area("Enter your prompt (in Mizo):", height=150) st.text("Example: Lirthei pung nasa chu hmasawnna rah a nih rualin harsatna tam tak..") generate_button = st.button("Generate Text") if generate_button and prompt: with st.spinner("Generating text..."): inputs = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate( inputs, max_length=50, temperature=0.7, top_p=0.9, do_sample=True, pad_token_id=tokenizer.eos_token_id ) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) st.subheader("Generated Text:") st.write(generated_text)