Update README.md
Browse files
README.md
CHANGED
@@ -53,7 +53,7 @@ A typical usage flow:
|
|
53 |
import transformers
|
54 |
import torch
|
55 |
|
56 |
-
model_id = "
|
57 |
|
58 |
pipeline = transformers.pipeline(
|
59 |
"text-generation",
|
@@ -67,9 +67,9 @@ prefix = "def add_numbers(a, b):\n "
|
|
67 |
suffix = "\n return result"
|
68 |
|
69 |
# Combine prefix and suffix following the FIM format
|
70 |
-
fim_input =
|
71 |
|
72 |
-
output = pipeline(fim_input, max_new_tokens=
|
73 |
print(output[0]["generated_text"])
|
74 |
```
|
75 |
|
|
|
53 |
import transformers
|
54 |
import torch
|
55 |
|
56 |
+
model_id = "/mnt/bn/daoguang/ckpts/Bytedance/Doubao-Coder-base/P6Dense"
|
57 |
|
58 |
pipeline = transformers.pipeline(
|
59 |
"text-generation",
|
|
|
67 |
suffix = "\n return result"
|
68 |
|
69 |
# Combine prefix and suffix following the FIM format
|
70 |
+
fim_input = '<[PLHD125_never_used_51bce0c785ca2f68081bfa7d91973934]>' + suffix + '<[PLHD124_never_used_51bce0c785ca2f68081bfa7d91973934]>' + prefix + '<[PLHD126_never_used_51bce0c785ca2f68081bfa7d91973934]>'
|
71 |
|
72 |
+
output = pipeline(fim_input, max_new_tokens=512)
|
73 |
print(output[0]["generated_text"])
|
74 |
```
|
75 |
|