Update README.md
Browse files
README.md
CHANGED
@@ -80,10 +80,11 @@ print(summary)
|
|
80 |
|
81 |
The model currently should close its summary with a `</CARD_SUMMARY>` (cooking some more with this...), so you can also use this as a stopping criterion when using `pipeline` inference.
|
82 |
|
83 |
-
```
|
84 |
from transformers import pipeline, StoppingCriteria, StoppingCriteriaList
|
85 |
import torch
|
86 |
|
|
|
87 |
class StopOnTokens(StoppingCriteria):
|
88 |
def __init__(self, tokenizer, stop_token_ids):
|
89 |
self.stop_token_ids = stop_token_ids
|
@@ -97,6 +98,7 @@ class StopOnTokens(StoppingCriteria):
|
|
97 |
return True
|
98 |
return False
|
99 |
|
|
|
100 |
# Initialize pipeline
|
101 |
pipe = pipeline("text-generation", "davanstrien/Smol-Hub-tldr")
|
102 |
tokenizer = pipe.tokenizer
|
|
|
80 |
|
81 |
The model currently should close its summary with a `</CARD_SUMMARY>` (cooking some more with this...), so you can also use this as a stopping criterion when using `pipeline` inference.
|
82 |
|
83 |
+
```python
|
84 |
from transformers import pipeline, StoppingCriteria, StoppingCriteriaList
|
85 |
import torch
|
86 |
|
87 |
+
|
88 |
class StopOnTokens(StoppingCriteria):
|
89 |
def __init__(self, tokenizer, stop_token_ids):
|
90 |
self.stop_token_ids = stop_token_ids
|
|
|
98 |
return True
|
99 |
return False
|
100 |
|
101 |
+
|
102 |
# Initialize pipeline
|
103 |
pipe = pipeline("text-generation", "davanstrien/Smol-Hub-tldr")
|
104 |
tokenizer = pipe.tokenizer
|