Skip to content

Commit 498a780

Browse files
recpytorchmergebot
authored andcommitted
Fix unused Python variables outside torch/ and test/ (pytorch#136359)
Pull Request resolved: pytorch#136359 Approved by: https://github.com/albanD
1 parent 241bf04 commit 498a780

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+39
-86
lines changed

.github/scripts/filter_test_configs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ def process_jobs(
332332
# The job name from github is in the PLATFORM / JOB (CONFIG) format, so breaking
333333
# it into its two components first
334334
current_platform, _ = (n.strip() for n in job_name.split(JOB_NAME_SEP, 1) if n)
335-
except ValueError as error:
335+
except ValueError:
336336
warnings.warn(f"Invalid job name {job_name}, returning")
337337
return test_matrix
338338

.github/scripts/runner_determinator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,7 @@ def load_yaml(yaml_text: str) -> Any:
258258
try:
259259
data = yaml.safe_load(yaml_text)
260260
return data
261-
except yaml.YAMLError as exc:
261+
except yaml.YAMLError:
262262
log.exception("Error loading YAML")
263263
raise
264264

.github/scripts/test_trymerge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -898,7 +898,7 @@ def test_dont_ignore_flaky_failures(self, *args: Any) -> None:
898898
repo = DummyGitRepo()
899899
# Check that failure is classified as flaky but still raises exception
900900
with warnings.catch_warnings(record=True) as w, self.assertRaises(RuntimeError):
901-
rule = find_matching_merge_rule(pr, repo)
901+
find_matching_merge_rule(pr, repo)
902902
self.assertEqual(len(w), 1)
903903
self.assertIn(
904904
"1 checks failed but were likely due flakiness or broken trunk",

.github/scripts/trymerge.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1747,7 +1747,7 @@ def get_readable_drci_results(drci_classifications: Any) -> str:
17471747
try:
17481748
print(f"From Dr.CI checkrun summary: {drci_summary}")
17491749
drci_classifications = json.loads(str(drci_summary))
1750-
except json.JSONDecodeError as error:
1750+
except json.JSONDecodeError:
17511751
warn("Invalid Dr.CI checkrun summary")
17521752
drci_classifications = {}
17531753

@@ -1918,7 +1918,6 @@ def do_revert_prs(
19181918
dry_run: bool = False,
19191919
) -> None:
19201920
# Prepare and push revert commits
1921-
commit_shas: List[str] = []
19221921
for commit_sha, pr in shas_and_prs:
19231922
revert_msg = f"\nReverted {pr.get_pr_url()} on behalf of {prefix_with_github_url(author_login)}"
19241923
revert_msg += extra_msg

.github/workflows/_runner-determinator.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ jobs:
326326
try:
327327
data = yaml.safe_load(yaml_text)
328328
return data
329-
except yaml.YAMLError as exc:
329+
except yaml.YAMLError:
330330
log.exception("Error loading YAML")
331331
raise
332332

benchmarks/distributed/rpc/rl/coordinator.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,6 @@ def run_coordinator(self, episodes, episode_steps, queue):
7272
print(f"Episode {ep} - ", end="")
7373

7474
n_steps = episode_steps
75-
agent_start_time = time.time()
7675

7776
futs = []
7877
for ob_rref in self.ob_rrefs:

benchmarks/dynamo/check_accuracy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
def get_field(csv, model_name: str, field: str):
2020
try:
2121
return csv.loc[csv["name"] == model_name][field].item()
22-
except Exception as e:
22+
except Exception:
2323
return None
2424

2525

benchmarks/dynamo/check_graph_breaks.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
def get_field(csv, model_name: str, field: str):
1010
try:
1111
return csv.loc[csv["name"] == model_name][field].item()
12-
except Exception as e:
12+
except Exception:
1313
return None
1414

1515

benchmarks/dynamo/common.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -671,7 +671,7 @@ def print_summary_table(data, print_dataframe=False):
671671
col.ljust(width),
672672
f"gmean={gmean(cdata):.2f}x mean={cdata.mean():.3f}x",
673673
)
674-
except Exception as e:
674+
except Exception:
675675
pass
676676

677677

@@ -3018,7 +3018,7 @@ def record_status(accuracy_status, dynamo_start_stats):
30183018
)
30193019
):
30203020
is_same = False
3021-
except Exception as e:
3021+
except Exception:
30223022
# Sometimes torch.allclose may throw RuntimeError
30233023
is_same = False
30243024

@@ -3110,7 +3110,7 @@ def record_status(accuracy_status, dynamo_start_stats):
31103110
tol=tolerance,
31113111
):
31123112
is_same = False
3113-
except Exception as e:
3113+
except Exception:
31143114
# Sometimes torch.allclose may throw RuntimeError
31153115
is_same = False
31163116

@@ -3157,7 +3157,7 @@ def check_tolerance(
31573157
self.init_optimizer(name, current_device, model.parameters())
31583158
optimized_model_iter_fn = optimize_ctx(self.run_n_iterations)
31593159
new_result = optimized_model_iter_fn(model, example_inputs)
3160-
except Exception as e:
3160+
except Exception:
31613161
log.exception("")
31623162
print(
31633163
"TorchDynamo optimized model failed to run because of following error"
@@ -3542,7 +3542,7 @@ def minify_model(
35423542

35433543
try:
35443544
shutil.move("repro.py", f"{repro_dir}/{name}_repro.py")
3545-
except OSError as e:
3545+
except OSError:
35463546
logging.error("Could not find repro script for model %s", name)
35473547
else:
35483548
logging.info(
@@ -4369,9 +4369,6 @@ def run(runner, args, original_dir=None):
43694369
# Set translation validation on by default on CI accuracy runs.
43704370
torch.fx.experimental._config.translation_validation = True
43714371

4372-
ci = functools.partial(
4373-
CI, args.backend, training=args.training, dynamic=args.dynamic_shapes
4374-
)
43754372
if args.ddp:
43764373
assert args.training, "DDP benchmark requires --training mode"
43774374
torch._dynamo.config.optimize_ddp = args.optimize_ddp_mode

benchmarks/dynamo/dist_util.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def model_iter_fn(model, example_inputs, collect_outputs=False):
9090

9191
def get_model(args):
9292
if args.torchbench_model:
93-
old_cwd = setup_torchbench_cwd()
93+
setup_torchbench_cwd()
9494
module = importlib.import_module(
9595
f"torchbenchmark.models.{args.torchbench_model}"
9696
)

benchmarks/dynamo/runner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1451,7 +1451,7 @@ def update(self):
14511451
RegressionDetector(self.args).generate_comment()
14521452
try:
14531453
RegressionTracker(self.args).diff()
1454-
except Exception as e:
1454+
except Exception:
14551455
logging.exception("")
14561456
with open(f"{self.args.output_dir}/gh_regression.txt", "w") as gh_fh:
14571457
gh_fh.write("")

benchmarks/dynamo/torchbench.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,6 @@ def load_model(
236236
)
237237
is_training = self.args.training
238238
use_eval_mode = self.args.use_eval_mode
239-
dynamic_shapes = self.args.dynamic_shapes
240239
candidates = [
241240
f"torchbenchmark.models.{model_name}",
242241
f"torchbenchmark.canary_models.{model_name}",

benchmarks/fastrnns/bench.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ def bench(rnn_runners, group_name, print_json=False, sep=" ", **params):
205205
result_with_no_info = result._replace(info_fwd="None", info_bwd="None")
206206
print_stderr(pretty_print(result_with_no_info, sep=sep))
207207
results[name] = result
208-
except Exception as e:
208+
except Exception:
209209
if not print_json:
210210
raise
211211

benchmarks/fastrnns/factory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -338,8 +338,8 @@ def forward(input, hidden):
338338
seq_len = len(input.unbind(0))
339339
hy, cy = new_hidden
340340
for i in range(seq_len):
341-
ln_i_output = ln_i(ln_input1)
342-
ln_h_output = ln_h(ln_input1)
341+
ln_i(ln_input1)
342+
ln_h(ln_input1)
343343
cy = ln_c(cy)
344344

345345
return out, (hy, cy)

benchmarks/fastrnns/test_bench.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def cuda_sync(func, *args, **kwargs):
4040
class TestBenchNetwork:
4141
# See 'modeldef' fixture, which provides the things to benchmark
4242
def test_forward(self, modeldef, benchmark):
43-
forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
43+
benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
4444

4545
def test_backward(self, modeldef, benchmark):
4646
backward_input = modeldef.forward(*modeldef.inputs)

benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626

2727
def parse_op_args(op):
28-
op_list = op.split(",")
28+
op_list = op.split(",") # noqa: F841
2929

3030

3131
def print_results(result):

benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -190,9 +190,9 @@ def run_once(model: Callable, inp: InputsType, task: str, v: VType, **kwargs) ->
190190
func = get_task_func(task)
191191

192192
if v is not None:
193-
res = func(model, inp, v=v, strict=True)
193+
func(model, inp, v=v, strict=True)
194194
else:
195-
res = func(model, inp, strict=True)
195+
func(model, inp, strict=True)
196196

197197

198198
def run_once_functorch(

benchmarks/fuser/run_benchmarks.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,6 @@ def run_benchmarks(operators, shapes):
284284
shapes = [globals()[k] for k in shapes.split(",")]
285285

286286
print("fuser,device,operator,shape,time")
287-
results = []
288287
for shape, operator in itertools.product(shapes, operators):
289288
nargs = len(inspect.signature(operator).parameters)
290289
args = shape()

benchmarks/gpt_fast/mixtral_moe_quantize.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,6 @@ def __init__(
132132
target_dtype=None,
133133
) -> None:
134134
assert target_dtype is not None
135-
factory_kwargs = {"device": device, "dtype": dtype}
136135
super().__init__()
137136
self.in_features = in_features
138137
self.out_features = out_features

benchmarks/gpt_fast/quantize.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ def __init__(
9393
device=None,
9494
dtype=None,
9595
) -> None:
96-
factory_kwargs = {"device": device, "dtype": dtype}
9796
super().__init__()
9897
self.in_features = in_features
9998
self.out_features = out_features

benchmarks/instruction_counts/core/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def parse_stmts(stmts: str) -> Tuple[str, str]:
7474
assert len(lines) >= 3, f"Invalid string:\n{stmts}"
7575

7676
column_header_pattern = r"^Python\s{35}\| C\+\+(\s*)$"
77-
signature_pattern = r"^: f\((.*)\)( -> (.+))?\s*$"
77+
signature_pattern = r"^: f\((.*)\)( -> (.+))?\s*$" # noqa: F841
7878
separation_pattern = r"^[-]{40} | [-]{40}$"
7979
code_pattern = r"^(.{40}) \|($| (.*)$)"
8080

benchmarks/nested/nested_bmm_bench.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,14 @@
66

77
def bench(nt_a, nt_b, niter):
88
# Warmup
9-
nt_c = nt_a.bmm(nt_b)
9+
nt_a.bmm(nt_b)
1010

1111
torch.cuda.synchronize()
1212
start_event = torch.cuda.Event(enable_timing=True)
1313
end_event = torch.cuda.Event(enable_timing=True)
1414
start_event.record()
1515
for iter in range(niter):
16-
nt_c = nt_a.bmm(nt_b)
16+
nt_a.bmm(nt_b)
1717
end_event.record()
1818
torch.cuda.synchronize()
1919
runtime = (start_event.elapsed_time(end_event)) / niter

benchmarks/operator_benchmark/benchmark_core.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,10 +111,9 @@ def _build_test(
111111

112112
if tags is None:
113113
raise ValueError("Missing tags in configs")
114-
input_config = str(test_attrs)[1:-1].replace("'", "")
114+
115115
op = bench_op()
116116
assert op is not None, "Can't create test"
117-
tensor_error_info = None
118117
# op_name_function is a dictionary which has op_name and op_function.
119118
# an example of op_name_function is:
120119
# {'op_name' : 'abs', 'op_function' : torch.abs}

benchmarks/operator_benchmark/pt/qrnn_test.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,6 @@ def init(self, I, H, NL, B, D, dtype):
3131

3232
# The quantized.dynamic.LSTM has a bug. That's why we create a regular
3333
# LSTM, and quantize it later. See issue #31192.
34-
scale = 1.0 / 256
35-
zero_point = 0
3634
cell_nn = nn.LSTM(
3735
input_size=I,
3836
hidden_size=H,

benchmarks/profiler_benchmark/profiler_bench.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def payload():
9797
with_stack=args.with_stack,
9898
use_kineto=args.use_kineto,
9999
use_cpu=not args.cuda_only,
100-
) as prof:
100+
):
101101
x = workload(input_x)
102102
return x
103103

benchmarks/serialization/simple_measurement.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,14 @@ def benchmark(self):
1313
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
1414

1515
with Timer() as big2:
16-
v = torch.load("big_tensor.zip")
16+
torch.load("big_tensor.zip")
1717

1818
x = [torch.ones(10, 10) for i in range(200)]
1919
with Timer() as small1:
2020
torch.save(x, "small_tensor.zip", _use_new_zipfile_serialization=use_new)
2121

2222
with Timer() as small2:
23-
v = torch.load("small_tensor.zip")
23+
torch.load("small_tensor.zip")
2424

2525
return {
2626
"Big Tensors Save": big1.ms_duration,

benchmarks/sparse/dlmc/utils.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,16 +56,12 @@ def load_sparse_matrix(path, device):
5656
def gen_vector(path, device):
5757
with open(path) as file:
5858
nrows, ncols, nnz = (int(el) for el in file.readline().split(", "))
59-
index_pointers = (int(el) for el in file.readline().split())
60-
indices = (int(el) for el in file.readline().split())
6159
return torch.randn(nrows, dtype=torch.double, device=device)
6260

6361

6462
def gen_matrix(path, device):
6563
with open(path) as file:
6664
nrows, ncols, nnz = (int(el) for el in file.readline().split(", "))
67-
index_pointers = (int(el) for el in file.readline().split())
68-
indices = (int(el) for el in file.readline().split())
6965
return torch.randn(nrows, ncols, dtype=torch.double, device=device)
7066

7167

benchmarks/sparse/triton_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ def show_best_messages(best_messages=best_messages):
374374
for r in range(args.repeat):
375375
try:
376376
time_ms, performance_tflops = test_func(x, y, **meta)
377-
except triton.compiler.OutOfResources as msg:
377+
except triton.compiler.OutOfResources:
378378
print(
379379
f"op={op}[{meta_str}]({bsr_size},{k}x{n}) dtype={args.dtype} {sparsity=}(nnz={x._nnz()})"
380380
f" blocksize={bm}x{bk} OutOfResources",

benchmarks/tensorexpr/elementwise.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,6 @@ def module(cls):
208208
return "simple_element"
209209

210210
def memory_workload(self):
211-
input_count = len(self.inputs)
212211
if self.mode == "fwd":
213212
sol_count = 2
214213
algorithmic_count = 2

benchmarks/transformer/better_transformer_vs_mha_functional.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def run(
136136
torch.testing.assert_close(
137137
y_native_mha_fast, y_native_mha_slow, atol=1e-3, rtol=1e-3
138138
)
139-
except AssertionError as e:
139+
except AssertionError:
140140
error_dict[entry_name] += 1
141141
pprint(error_dict)
142142

benchmarks/transformer/score_mod.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,6 @@ def generate_inputs(
9898

9999
assert q_heads % kv_heads == 0
100100

101-
num_h_groups = q_heads // kv_heads
102-
103101
make_q = partial(
104102
torch.rand, q_shape, device=device, dtype=dtype, requires_grad=requires_grad
105103
)

benchmarks/transformer/sdp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ def run_single_experiment(config: ExperimentConfig) -> ExperimentResults:
211211
enable_flash=config.enable_flash,
212212
enable_mem_efficient=config.enable_mem_efficient,
213213
enable_cudnn=config.enable_cudnn,
214-
) as kernel_choice, torch.inference_mode() as inference_mode:
214+
):
215215
dropout_p = 0.0
216216
mask = None
217217

functorch/dim/delayed_mul_tensor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@ def to_char(d):
6262

6363
plhs, levelslhs = self._lhs._tensor, self._lhs._levels
6464
prhs, levelsrhs = self._rhs._tensor, self._rhs._levels
65-
new_dims = tuple(d for d in self.dims if d not in dims)
6665
new_levels = [l for l in self._levels if l not in dims]
6766
fmt = "".join(
6867
[

functorch/dim/reference.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,6 @@ def unwrap(t):
198198

199199
if orig in pointwise:
200200
result_levels = llist()
201-
arg_levels = llist()
202201
to_expand = []
203202
for i, f in enumerate(flat_args):
204203
if isinstance(f, TensorLike):
@@ -268,7 +267,6 @@ def positional(self, *dims):
268267
needs_view = True
269268

270269
permute = list(range(len(levels)))
271-
nflat = len(flat_dims)
272270
for i, d in enumerate(flat_dims):
273271
try:
274272
idx = levels.index(d)

0 commit comments

Comments
 (0)