Zerx966 commited on
Commit
6947a9d
1 Parent(s): ac4471f

Create Gpt

Browse files
Files changed (1) hide show
  1. Gpt +28 -0
Gpt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
+ # Load the pre-trained models and tokenizers
4
+ wormgpt_model = GPT2LMHeadModel.from_pretrained("wormgpt")
5
+ wormgpt_tokenizer = GPT2Tokenizer.from_pretrained("wormgpt")
6
+ fraudgpt_model = GPT2LMHeadModel.from_pretrained("fraudgpt")
7
+ fraudgpt_tokenizer = GPT2Tokenizer.from_pretrained("fraudgpt")
8
+ xxxgpt_model = GPT2LMHeadModel.from_pretrained("xxxgpt")
9
+ xxxgpt_tokenizer = GPT2Tokenizer.from_pretrained("xxxgpt")
10
+ evilgpt_model = GPT2LMHeadModel.from_pretrained("evilgpt")
11
+ evilgpt_tokenizer = GPT2Tokenizer.from_pretrained("evilgpt")
12
+ # Function to generate text from a given prompt using the specified model
13
+ def generate_text(prompt, model, tokenizer, max_length=50):
14
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
15
+ output = model.generate(input_ids, max_length=max_length, num_return_sequences=1)
16
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
17
+ return generated_text
18
+ # Function to generate text from a given prompt using all four models
19
+ def generate_uncensored_text(prompt, max_length=50):
20
+ wormgpt_text = generate_text(prompt, wormgpt_model, wormgpt_tokenizer, max_length)
21
+ fraudgpt_text = generate_text(prompt, fraudgpt_model, fraudgpt_tokenizer, max_length)
22
+ xxxgpt_text = generate_text(prompt, xxxgpt_model, xxxgpt_tokenizer, max_length)
23
+ evilgpt_text = generate_text(prompt, evilgpt_model, evilgpt_tokenizer, max_length)
24
+ return wormgpt_text + "\n" + fraudgpt_text + "\n" + xxxgpt_text + "\n" + evilgpt_text
25
+ # Example usage
26
+ prompt = "I want to generate some uncensored text."
27
+ uncensored_text = generate_uncensored_text(prompt)
28
+ print(uncensored_text)