Skip to content

Commit de4ef61

Browse files
committed
Fix linting errors (#63)
1 parent 9de531d commit de4ef61

1 file changed

Lines changed: 32 additions & 14 deletions

File tree

vcache/vcache_policy/strategies/benchmark_iid_verified.py

Lines changed: 32 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,13 @@
11
import logging
22
import os
33
import queue
4-
import random
54
import threading
65
from concurrent.futures import ThreadPoolExecutor
76
from enum import Enum
8-
from typing import Dict, List, Optional, Tuple
7+
from typing import Optional
98

109
import numpy as np
11-
import statsmodels.api as sm
12-
from scipy.special import expit
1310
from scipy.stats import norm
14-
from sklearn.linear_model import LogisticRegression
1511
from typing_extensions import override
1612

1713
from vcache.config import VCacheConfig
@@ -497,23 +493,45 @@ def select_action(
497493
negative_samples = similarities[labels==0].reshape(-1,1,1)
498494
labels = labels.reshape(-1,1,1)
499495
tholds = self.thold_grid.reshape(1,-1,1)
500-
deltap = self.delta * (num_negative_samples + num_positive_samples)/num_negative_samples
496+
deltap = (
497+
self.delta * (num_negative_samples + num_positive_samples)
498+
) / num_negative_samples
501499

502500
epsilon = self.epsilon_grid[self.epsilon_grid < deltap].reshape(1,1,-1)
503501

504-
cdf_estimate = np.sum(negative_samples < tholds, axis=0, keepdims=True) / num_negative_samples # (1, tholds, 1)
505-
cdf_ci_lower, cdf_ci_upper = self.wilson_proportion_ci(cdf_estimate, num_negative_samples, confidence=1-epsilon) # (1, tholds, epsilon)
506-
507-
pc_adjusted = 1 - (deltap - epsilon) / (1 - epsilon) # adjust for positive samples (1,1,epsilon)
502+
cdf_estimate = (
503+
np.sum(negative_samples < tholds, axis=0, keepdims=True) /
504+
num_negative_samples
505+
) # (1, tholds, 1)
506+
cdf_ci_lower, cdf_ci_upper = self.wilson_proportion_ci(
507+
cdf_estimate, num_negative_samples, confidence=1 - epsilon
508+
) # (1, tholds, epsilon)
509+
510+
# adjust for positive samples (1,1,epsilon)
511+
pc_adjusted = 1 - (deltap - epsilon) / (1 - epsilon)
508512

509513

510-
t_hats = (np.sum(cdf_estimate > pc_adjusted, axis=1, keepdims=True) == 0) * 1.0 + (1 - (np.sum(cdf_estimate > pc_adjusted, axis=1, keepdims=True) == 0)) * self.thold_grid[np.argmax(cdf_estimate > pc_adjusted, axis=1, keepdims=True)]
511-
t_primes = (np.sum(cdf_ci_lower > pc_adjusted, axis=1, keepdims=True) == 0) * 1.0 + (1 - (np.sum(cdf_ci_lower > pc_adjusted, axis=1, keepdims=True) == 0)) * self.thold_grid[np.argmax(cdf_ci_lower > pc_adjusted, axis=1, keepdims=True)]
514+
t_hats = (
515+
(np.sum(cdf_estimate > pc_adjusted, axis=1, keepdims=True) == 0) * 1.0
516+
+ (
517+
1 - (np.sum(cdf_estimate > pc_adjusted, axis=1, keepdims=True) == 0)
518+
)
519+
* self.thold_grid[
520+
np.argmax(cdf_estimate > pc_adjusted, axis=1, keepdims=True)
521+
]
522+
)
523+
t_primes = (
524+
(np.sum(cdf_ci_lower > pc_adjusted, axis=1, keepdims=True) == 0) * 1.0
525+
+ (
526+
1 - (np.sum(cdf_ci_lower > pc_adjusted, axis=1, keepdims=True) == 0)
527+
)
528+
* self.thold_grid[
529+
np.argmax(cdf_ci_lower > pc_adjusted, axis=1, keepdims=True)
530+
]
531+
)
512532

513533
t_hat = np.min(t_hats)
514534
t_prime = np.min(t_primes)
515-
# if t_prime < 1.0:
516-
# print(f"t_hat: {t_hat}, t_prime: {t_prime} num_positive_samples: {num_positive_samples} num_negative_samples: {num_negative_samples}")
517535
metadata.t_prime = t_prime
518536
metadata.t_hat = t_hat
519537
metadata.var_t = -1 # not computed

0 commit comments

Comments
 (0)