Skip to content

Commit b8e12e1

Browse files
committed
Use inference_device
1 parent fecfdd1 commit b8e12e1

File tree

1 file changed

+8
-3
lines changed

1 file changed

+8
-3
lines changed

ch05/01_main-chapter-code/ch05.ipynb

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1519,14 +1519,19 @@
15191519
}
15201520
],
15211521
"source": [
1522-
"model.to(\"cpu\")\n",
1522+
"# NEW: use CPU here as inference is cheap with \n",
1523+
"# this model and to ensure readers get same results in the\n",
1524+
"# remaining sections of this book\n",
1525+
"inference_device = torch.device(\"cpu\")\n",
1526+
"\n",
1527+
"model.to(inference_device)\n",
15231528
"model.eval()\n",
15241529
"\n",
15251530
"tokenizer = tiktoken.get_encoding(\"gpt2\")\n",
15261531
"\n",
15271532
"token_ids = generate_text_simple(\n",
15281533
" model=model,\n",
1529-
" idx=text_to_token_ids(\"Every effort moves you\", tokenizer),\n",
1534+
" idx=text_to_token_ids(\"Every effort moves you\", tokenizer).to(inference_device),\n",
15301535
" max_new_tokens=25,\n",
15311536
" context_size=GPT_CONFIG_124M[\"context_length\"]\n",
15321537
")\n",
@@ -2030,7 +2035,7 @@
20302035
"\n",
20312036
"token_ids = generate(\n",
20322037
" model=model,\n",
2033-
" idx=text_to_token_ids(\"Every effort moves you\", tokenizer),\n",
2038+
" idx=text_to_token_ids(\"Every effort moves you\", tokenizer).to(inference_device),\n",
20342039
" max_new_tokens=15,\n",
20352040
" context_size=GPT_CONFIG_124M[\"context_length\"],\n",
20362041
" top_k=25,\n",

0 commit comments

Comments
 (0)