import gradio as gr import requests import os ##Bloom Inference API API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" HF_TOKEN = os.environ["HF_TOKEN"] headers = {"Authorization": f"Bearer {HF_TOKEN}"} def text_generate(prompt, generated_txt): #Prints to debug the code print(f"*****Inside text_generate - Prompt is :{prompt}") json_ = {"inputs": prompt, "parameters": { "top_p": 0.9, "temperature": 1.1, #"max_new_tokens": 64, "return_full_text": True, "do_sample":True, }, "options": {"use_cache": True, "wait_for_model": True, },} response = requests.post(API_URL, headers=headers, json=json_) print(f"Response is : {response}") output = response.json() print(f"output is : {output}") output_tmp = output[0]['generated_text'] print(f"output_tmp is: {output_tmp}") solution = output_tmp.split("\nQ:")[0] print(f"Final response after splits is: {solution}") if '\nOutput:' in solution: final_solution = solution.split("\nOutput:")[0] print(f"Response after removing output is: {final_solution}") elif '\n\n' in solution: final_solution = solution.split("\n\n")[0] print(f"Response after removing new line entries is: {final_solution}") else: final_solution = solution if len(generated_txt) == 0 : display_output = final_solution else: display_output = generated_txt[:-len(prompt)] + final_solution new_prompt = final_solution[len(prompt):] print(f"new prompt for next cycle is : {new_prompt}") print(f"display_output for printing on screen is : {display_output}") if len(new_prompt) == 0: temp_text = display_output[::-1] print(f"What is the last character of sentence? : {temp_text[0]}") if temp_text[1] == '.': first_period_loc = temp_text[2:].find('.') + 1 print(f"Location of last Period is: {first_period_loc}") new_prompt = display_output[-first_period_loc:-1] print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}") else: print("HERE") first_period_loc = temp_text.find('.') print(f"Location of last Period is : {first_period_loc}") new_prompt = display_output[-first_period_loc:-1] print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}") display_output = display_output[:-1] return display_output, new_prompt demo = gr.Blocks() with demo: gr.Markdown("