Skip to content

Commit db03412

Browse files
committed
updated blog
1 parent 8906e88 commit db03412

2 files changed

Lines changed: 10 additions & 10 deletions

File tree

demo/local_llm_verifier.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
import logging
1212
import os
1313
from dataclasses import dataclass
14-
from typing import Any, Optional
14+
from typing import Any
1515

16-
from transformers import AutoModelForCausalLM, AutoTokenizer # type: ignore
16+
from transformers import AutoModelForCausalLM, AutoTokenizer
1717

1818
logger = logging.getLogger(__name__)
1919

@@ -64,8 +64,8 @@ def __init__(
6464
self.max_tokens = max_tokens
6565
self.temperature = temperature
6666

67-
self._model: Optional[Any] = None
68-
self._tokenizer: Optional[Any] = None
67+
self._model: Any | None = None
68+
self._tokenizer: Any | None = None
6969
self._initialized = False
7070

7171
def _lazy_init(self) -> None:
@@ -250,24 +250,24 @@ def _generate(self, system_prompt: str, user_prompt: str) -> str:
250250
]
251251

252252
# Apply chat template
253-
text = self._tokenizer.apply_chat_template( # type: ignore
253+
text = self._tokenizer.apply_chat_template(
254254
messages, tokenize=False, add_generation_prompt=True
255255
)
256256

257257
# Tokenize
258-
inputs = self._tokenizer([text], return_tensors="pt").to(self._model.device) # type: ignore
258+
inputs = self._tokenizer([text], return_tensors="pt").to(self._model.device)
259259

260260
# Generate
261-
outputs = self._model.generate( # type: ignore
261+
outputs = self._model.generate(
262262
**inputs,
263263
max_new_tokens=self.max_tokens,
264264
temperature=self.temperature if self.temperature > 0 else None,
265265
do_sample=self.temperature > 0,
266-
pad_token_id=self._tokenizer.eos_token_id, # type: ignore
266+
pad_token_id=self._tokenizer.eos_token_id,
267267
)
268268

269269
# Decode
270-
generated_text: str = self._tokenizer.decode(outputs[0], skip_special_tokens=True) # type: ignore
270+
generated_text: str = self._tokenizer.decode(outputs[0], skip_special_tokens=True)
271271

272272
# Extract response (everything after the user prompt)
273273
# This handles the chat template format

demo/secure_browser_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,9 @@
1313
import os
1414
import sys
1515
import uuid
16+
from collections.abc import Callable
1617
from datetime import datetime
1718
from pathlib import Path
18-
from typing import Callable
1919

2020
from dotenv import load_dotenv
2121
from rich.console import Console

0 commit comments

Comments
 (0)