j10sanders commited on
Commit
5569fcb
1 Parent(s): f2cca47

Upload 3 files

Browse files
Files changed (3) hide show
  1. __init__.py +0 -0
  2. codegen.py +36 -0
  3. wsgi.py +4 -0
__init__.py ADDED
File without changes
codegen.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+
3
+
4
+ DEVICE = 'cpu'
5
+ TOKENIZER = None
6
+ MODEL = None
7
+
8
+
9
+ def setup(model: str, setup_torch: bool = False):
10
+ global TOKENIZER, MODEL, DEVICE
11
+ if setup_torch:
12
+ try:
13
+ import torch
14
+ torch.set_default_tensor_type(torch.cuda.FloatTensor)
15
+ # Make sure that we're using CPU when GPU isn't available
16
+ DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
17
+ except:
18
+ print("ERROR: Can't set default tensor type to FloatTensor")
19
+
20
+ TOKENIZER = AutoTokenizer.from_pretrained(model)
21
+ MODEL = AutoModelForCausalLM.from_pretrained(model)
22
+
23
+
24
+ def generate(token: str) -> str:
25
+ """
26
+ Generate some code using the loaded model given some input.
27
+
28
+ :param token: The input that will be passed into the tokenizer for the model to generate an output with.
29
+ :return: The string output.
30
+ """
31
+ if TOKENIZER is None or MODEL is None:
32
+ raise Exception("Model and tokenizer has not been setup.")
33
+
34
+ inputs = TOKENIZER(token, return_tensors='pt').to(DEVICE)
35
+ sample = MODEL.generate(**inputs, max_length=128)
36
+ return TOKENIZER.decode(sample[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"])
wsgi.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from rubber_duck.app import app
2
+
3
+ if __name__ == '__main__':
4
+ app.run(host="0.0.0.0", port="5002")