| | from flask import Flask, render_template, request
|
| | import torch
|
| | from transformers import AutoTokenizer, AutoModelForCausalLM
|
| |
|
| | app = Flask(__name__)
|
| |
|
| |
|
| | model_path = "./finetuned_codegen"
|
| | tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| | model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float32)
|
| |
|
| |
|
| | tokenizer.pad_token = tokenizer.eos_token
|
| |
|
| |
|
| | device = torch.device("cpu")
|
| | model.to(device)
|
| |
|
| | @app.route("/", methods=["GET", "POST"])
|
| | def index():
|
| | generated_code = ""
|
| | prompt = ""
|
| | if request.method == "POST":
|
| | prompt = request.form["prompt"]
|
| | inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128).to(device)
|
| | outputs = model.generate(
|
| | **inputs,
|
| | max_length=200,
|
| | num_return_sequences=1,
|
| | pad_token_id=tokenizer.eos_token_id,
|
| | do_sample=True,
|
| | temperature=0.2,
|
| | top_p=0.95,
|
| | top_k=50,
|
| | no_repeat_ngram_size=3
|
| | )
|
| | generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| |
|
| | if generated_code.startswith(prompt):
|
| | generated_code = generated_code[len(prompt):].strip()
|
| |
|
| | generated_code = generated_code.split("\n")[0].strip() if "\n" in generated_code else generated_code
|
| | return render_template("index.html", generated_code=generated_code, prompt=prompt)
|
| |
|
| | if __name__ == "__main__":
|
| | app.run(debug=True) |