wenhuach commited on
Commit
51ecd16
·
verified ·
1 Parent(s): e772e91

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -2
README.md CHANGED
@@ -49,8 +49,7 @@ inputs = tokenizer(texts, return_tensors="pt", padding=True, truncation=True, pa
49
  # conduct text completion
50
  outputs = model.generate(
51
  **inputs,
52
- # max_new_tokens=65536, ## change to this to follow official usage
53
- max_new_tokens=512
54
  )
55
  generated_ids = [
56
  output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs["input_ids"], outputs)
 
49
  # conduct text completion
50
  outputs = model.generate(
51
  **inputs,
52
+ max_new_tokens=65536,
 
53
  )
54
  generated_ids = [
55
  output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs["input_ids"], outputs)