zylj commited on
Commit
0aaccff
1 Parent(s): 36e0d8f
Files changed (3) hide show
  1. Dockerfile +1 -1
  2. Dockerfile-13b +1 -1
  3. Dockerfile-7b +1 -1
Dockerfile CHANGED
@@ -18,7 +18,7 @@ RUN pip install -r requriments.txt \
18
  && git clone https://huggingface.co/lmsys/${vicuna_diff} \
19
  && git clone https://huggingface.co/decapoda-research/${llama_version} \
20
  && pip install git+https://github.com/lm-sys/[email protected] \
21
- && sed -i 's/LLaMATokenizer/LlaMATokenizer/' ${llama_version}/tokenizer_config.json \
22
  && python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
23
  && sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
24
 
 
18
  && git clone https://huggingface.co/lmsys/${vicuna_diff} \
19
  && git clone https://huggingface.co/decapoda-research/${llama_version} \
20
  && pip install git+https://github.com/lm-sys/[email protected] \
21
+ && sed -i 's/LLaMATokenizer/LlamaTokenizer/' ${llama_version}/tokenizer_config.json \
22
  && python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
23
  && sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
24
 
Dockerfile-13b CHANGED
@@ -18,7 +18,7 @@ RUN pip install -r requriments-13b.txt \
18
  && git clone https://huggingface.co/lmsys/${vicuna_diff} \
19
  && git clone https://huggingface.co/decapoda-research/${llama_version} \
20
  && pip install git+https://github.com/lm-sys/[email protected] \
21
- && sed -i 's/LLaMATokenizer/LlaMATokenizer/' ${llama_version}/tokenizer_config.json \
22
  && python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
23
  && sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
24
 
 
18
  && git clone https://huggingface.co/lmsys/${vicuna_diff} \
19
  && git clone https://huggingface.co/decapoda-research/${llama_version} \
20
  && pip install git+https://github.com/lm-sys/[email protected] \
21
+ && sed -i 's/LLaMATokenizer/LlamaTokenizer/' ${llama_version}/tokenizer_config.json \
22
  && python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
23
  && sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
24
 
Dockerfile-7b CHANGED
@@ -18,7 +18,7 @@ RUN pip install -r requriments.txt \
18
  && git clone https://huggingface.co/lmsys/${vicuna_diff} \
19
  && git clone https://huggingface.co/decapoda-research/${llama_version} \
20
  && pip install git+https://github.com/lm-sys/[email protected] \
21
- && sed -i 's/LLaMATokenizer/LlaMATokenizer/' ${llama_version}/tokenizer_config.json \
22
  && python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
23
  && sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
24
 
 
18
  && git clone https://huggingface.co/lmsys/${vicuna_diff} \
19
  && git clone https://huggingface.co/decapoda-research/${llama_version} \
20
  && pip install git+https://github.com/lm-sys/[email protected] \
21
+ && sed -i 's/LLaMATokenizer/LlamaTokenizer/' ${llama_version}/tokenizer_config.json \
22
  && python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
23
  && sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
24