Spaces:
Sleeping
Sleeping
| import os | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| MODEL_NAME = "bigcode/starcoderbase-1b" | |
| HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN") | |
| # Force CPU mode | |
| device = "cpu" | |
| # Load tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) | |
| # Ensure the tokenizer has a pad token set | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| token=HF_TOKEN, | |
| torch_dtype=torch.float32, # Change to float32 for CPU compatibility | |
| trust_remote_code=True | |
| ).to(device) # Explicitly move to CPU | |
| def generate_code(prompt: str, max_tokens: int = 256): | |
| inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device) | |
| output = model.generate(**inputs, max_new_tokens=max_tokens, pad_token_id=tokenizer.pad_token_id) | |
| return tokenizer.decode(output[0], skip_special_tokens=True) | |