DavidGF commited on
Commit
62e2925
1 Parent(s): 351ee27

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -49,7 +49,7 @@ tokenizer = model.tokenizer
49
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
50
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
51
  output_ids = model.generate(input_ids, max_length=250)
52
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
53
  ```
54
 
55
 
@@ -66,7 +66,7 @@ tokenizer = model.tokenizer
66
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
67
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
68
  output_ids = model.generate(input_ids ,temperature=0.1, do_sample=True, top_p=0.9,top_k=20, max_length=500)
69
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
70
  ```
71
 
72
  # Call the Python Expert:
@@ -82,7 +82,7 @@ input_text = tokenizer.apply_chat_template(messages, tokenize=False)
82
  print(input_text)
83
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
84
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=400)
85
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
86
  ```
87
 
88
  # Call the SQL expert:
@@ -99,7 +99,7 @@ tokenizer = model.tokenizer
99
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
100
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
101
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=500)
102
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
103
  ```
104
 
105
  # Call the German expert:
@@ -113,7 +113,7 @@ tokenizer = model.tokenizer
113
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
114
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
115
  output_ids = model.generate(input_ids, max_length=150)
116
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
117
  ```
118
 
119
 
 
49
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
50
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
51
  output_ids = model.generate(input_ids, max_length=250)
52
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
53
  ```
54
 
55
 
 
66
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
67
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
68
  output_ids = model.generate(input_ids ,temperature=0.1, do_sample=True, top_p=0.9,top_k=20, max_length=500)
69
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
70
  ```
71
 
72
  # Call the Python Expert:
 
82
  print(input_text)
83
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
84
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=400)
85
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
86
  ```
87
 
88
  # Call the SQL expert:
 
99
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
100
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
101
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=500)
102
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
103
  ```
104
 
105
  # Call the German expert:
 
113
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
114
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
115
  output_ids = model.generate(input_ids, max_length=150)
116
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
117
  ```
118
 
119