Skip to content

Commit 2fd7566

Browse files
cyyeverpytorchmergebot
authored andcommitted
[Caffe2]Remove Caffe2 scripts and benchmarks (pytorch#126747)
Due to removal of Caffe2. Pull Request resolved: pytorch#126747 Approved by: https://github.com/ezyang, https://github.com/malfet
1 parent e98662b commit 2fd7566

23 files changed

+29
-1674
lines changed

benchmarks/framework_overhead_benchmark/C2Module.py

Lines changed: 0 additions & 45 deletions
This file was deleted.

benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py

Lines changed: 11 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import argparse
22

3-
from C2Module import C2SimpleNet
43
from pt_wrapper_module import WrapperModule
54

65
from SimpleAddModule import add_tensors_loop, SimpleAddModule
@@ -19,9 +18,6 @@
1918
--add-op --graph-mode --eager-mode (Runs both graph mode and eager mode)
2019
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
2120
--add-op --graph-mode (Runs only graph mode)
22-
To run C2 benchmark:
23-
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
24-
--add-op --benchmark-c2-net
2521
"""
2622

2723
SUPPORTED_OPS = {"add_op"}
@@ -49,39 +45,22 @@ def benchmark_simple_fn(args, config, module_config, module_type, result):
4945
module_type: Type of the module to be wrapped. e.g. SimpleAddModule for add op.
5046
result: dictionary instance to be populated with the benchmark result (latency per iter).
5147
"""
52-
benchmark_c2_net = args.benchmark_c2_net
5348
print(f"Benchmarking {module_type.__name__}")
54-
if benchmark_c2_net:
55-
op_name = module_config.c2_op
56-
num_inputs = module_config.num_params
57-
module = C2SimpleNet(op_name, num_inputs=num_inputs, debug=args.debug)
58-
latency_per_iter_ms = benchmark_module(config, module)
59-
result[op_name] = latency_per_iter_ms
60-
else:
61-
f_name = (
62-
module_config.pt_fn.__name__
63-
+ ":Num Operands="
64-
+ str(module_config.num_params)
65-
)
66-
graph_mode_str = "Graph mode" + ":" + str(module_config.graph_mode)
67-
result_key = ",".join((f_name, graph_mode_str))
68-
module = WrapperModule(module_type, module_config, args.debug, args.save)
69-
latency_per_iter_ms = benchmark_module(
70-
config, module, args.use_throughput_benchmark
71-
)
72-
result[result_key] = latency_per_iter_ms
49+
f_name = (
50+
module_config.pt_fn.__name__ + ":Num Operands=" + str(module_config.num_params)
51+
)
52+
graph_mode_str = "Graph mode" + ":" + str(module_config.graph_mode)
53+
result_key = ",".join((f_name, graph_mode_str))
54+
module = WrapperModule(module_type, module_config, args.debug, args.save)
55+
latency_per_iter_ms = benchmark_module(
56+
config, module, args.use_throughput_benchmark
57+
)
58+
result[result_key] = latency_per_iter_ms
7359

7460

7561
def main():
7662
parser = argparse.ArgumentParser()
7763
parser.add_argument("--op", default="add_op", dest="op", type=str)
78-
parser.add_argument(
79-
"--benchmark-c2-net",
80-
"--benchmark_c2_net",
81-
default=False,
82-
dest="benchmark_c2_net",
83-
action="store_true",
84-
)
8564
parser.add_argument(
8665
"--use-throughput-benchmark",
8766
"--use_throughput_benchmark",
@@ -107,10 +86,6 @@ def main():
10786
if args.op not in SUPPORTED_OPS:
10887
print(f"Op {args.op} is not supported: Supported ops are:{SUPPORTED_OPS}")
10988
return
110-
assert not (
111-
args.benchmark_c2_net and args.use_throughput_benchmark
112-
), "Benchmarking of C2 net via throughput benchmarking is not yet supported"
113-
11489
num_warmup_iters = args.num_warmup_iters
11590
num_iters = args.num_iters
11691
config = BenchmarkConfig(num_warmup_iters, num_iters)
@@ -120,10 +95,7 @@ def main():
12095
result = {}
12196
if args.op == "add_op":
12297
num_params = 2
123-
if args.benchmark_c2_net:
124-
module_config = ModuleConfig(None, "Sum", num_params, None)
125-
else:
126-
module_config = ModuleConfig(add_tensors_loop, None, num_params, graph_mode)
98+
module_config = ModuleConfig(add_tensors_loop, None, num_params, graph_mode)
12799
benchmark_simple_fn(args, config, module_config, SimpleAddModule, result)
128100
print_results(result)
129101

benchmarks/operator_benchmark/README.md

Lines changed: 6 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
1-
# PyTorch/Caffe2 Operator Micro-benchmarks
1+
# PyTorch Operator Micro-benchmarks
22

3-
This benchmark suite provides a systemic way to measure the performance of operators for a wide range of inputs. The generated benchmark data fully characterized the performance of an operator in terms of execution time and the efficiency of the PyTorch/Caffe2 frameworks used.
3+
This benchmark suite provides a systemic way to measure the performance of operators for a wide range of inputs. The generated benchmark data fully characterized the performance of an operator in terms of execution time and the efficiency of the PyTorch frameworks used.
44

55
## Features
66

77
Key Features:
88

99
1\. Language used: Python
1010

11-
2\. Supported Frameworks: PyTorch and Caffe2
11+
2\. Supported Frameworks: PyTorch
1212

1313
3\. Supported PyTorch mode: eager and JIT
1414

@@ -49,7 +49,7 @@ python -m benchmark_all_test
4949
```
5050

5151
## Code to support `torch.add` in the benchmark
52-
The following example shows the code to support `torch.add` with 27 different tests. In the subpages of this wiki, we'll step through the complete flow of adding PyTorch and Caffe2 operators to the benchmark suite. Existing benchmarks for operators are in `pt` and `c2` directories and we highly recommend putting your new operators in those locations.
52+
The following example shows the code to support `torch.add` with 27 different tests. In the subpages of this wiki, we'll step through the complete flow of adding PyTorch operators to the benchmark suite. Existing benchmarks for operators are in the `pt` directory and we highly recommend putting your new operators in those locations.
5353

5454
```python
5555
add_short_configs = op_bench.cross_product_configs(
@@ -77,7 +77,7 @@ op_bench.generate_pt_test(add_short_configs, AddBenchmark)
7777
The output is intended to be a human readable format. Here is an example output for `torch.add`:
7878
```
7979
# ----------------------------------------
80-
# PyTorch/Caffe2 Operator Micro-benchmarks
80+
# PyTorch Operator Micro-benchmarks
8181
# ----------------------------------------
8282
# Tag : short
8383
@@ -146,7 +146,7 @@ python -m pt.add_test --tag-filter long
146146
```
147147

148148
## Adding New Operators to the Benchmark Suite
149-
In the previous sections, we gave several examples to show how to run the already available operators in the benchmark suite. In the following sections, we'll step through the complete flow of adding PyTorch and Caffe2 operators to the benchmark suite. Existing benchmarks for operators are in `pt` and `c2` directories and we highly recommend putting your new operators in those directories as well.
149+
In the previous sections, we gave several examples to show how to run the already available operators in the benchmark suite. In the following sections, we'll step through the complete flow of adding PyTorch operators to the benchmark suite. Existing benchmarks for operators are in the `pt` directory and we highly recommend putting your new operators in those directories as well.
150150

151151
### Add a New PyTorch Operator
152152
Let's say you want to measure the execution time of the following operator:
@@ -260,55 +260,6 @@ if __name__ == "__main__":
260260
```
261261
That's it. You just added a new operator to the benchmark suite!
262262

263-
264-
### Add a New Caffe2 Operator
265-
The steps to add a new Caffe2 operator is the same as that for a PyTorch operator. The code below shows how to add Caffe2 `Add` operator:
266-
```python
267-
import operator_benchmark as op_bench
268-
from caffe2.python import core
269-
270-
add_long_configs = op_bench.cross_product_configs(
271-
M=[8, 64, 128],
272-
N=range(2, 10, 3),
273-
K=[2 ** x for x in range(0, 3)],
274-
tags=["long"]
275-
)
276-
277-
add_short_configs = op_bench.config_list(
278-
attrs=[
279-
[8, 16, 32],
280-
[16, 16, 64],
281-
[64, 64, 128],
282-
],
283-
attr_names=["M", "N", "K"],
284-
tags=["short"],
285-
)
286-
287-
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
288-
289-
def init(self, M, N, K):
290-
self.input_one = self.tensor(M, N, K)
291-
self.input_two = self.tensor(M, N, K)
292-
self.output = self.tensor(M, N, K)
293-
self.set_module_name("add")
294-
295-
def forward(self):
296-
op = core.CreateOperator(
297-
"Add", [self.input_one, self.input_two], self.output, **self.args
298-
)
299-
300-
return op
301-
302-
op_bench.generate_c2_test(add_long_configs + add_short_configs, AddBenchmark)
303-
304-
if __name__ == "__main__":
305-
op_bench.benchmark_runner.main()
306-
```
307-
There are two things worth mentioning in this code:
308-
* `self.tensor` is a helper function which takes shapes and returns a Caffe2 blob. It is designed to make the tensor creation step easier compared to the standard Caffe2 way.
309-
* `generate_c2_test` is used to register Caffe2 tests with the benchmark.
310-
311-
312263
### Add a List of Operators
313264
In the previous sections, we introduced the steps required to add a single operator to the benchmark suite. There are scenarios where you want to extend the benchmark suite with a list of operators which can share the same inputs. For example, to benchmark `abs` and `acos` operators, you can use the same set of inputs for both.
314265

@@ -416,37 +367,3 @@ The example below shows the relevant code for that:
416367
self.input_one = torch.rand(M, N, K, requires_grad=True)
417368
generate_pt_gradient_test(long_configs + short_configs, TorchAddBenchmark)
418369
```
419-
#### For Caffe2 Gradient Ops
420-
To add Caffe2 gradient ops, we need to implement a new backward method in the benchmark class:
421-
```python
422-
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
423-
424-
def init(self, M, N, K):
425-
self.input_one = self.tensor(M, N, K)
426-
self.input_two = self.tensor(M, N, K)
427-
self.input_one_grad = self.tensor(M, N, K)
428-
self.input_two_grad = self.tensor(M, N, K)
429-
self.output = self.tensor(M, N, K)
430-
self.set_module_name("add")
431-
432-
def forward(self):
433-
op = core.CreateOperator(
434-
"Add", [self.input_one, self.input_two], self.output, **self.args
435-
)
436-
437-
return op
438-
439-
def backward(self):
440-
grad_op = core.CreateOperator(
441-
"AddGradient",
442-
[self.output, self.input_one, self.input_two],
443-
[self.input_one_grad, self.input_two_grad], **self.args
444-
)
445-
446-
return grad_op
447-
448-
op_bench.generate_c2_gradient_test(long_configs + short_configs,AddBenchmark)
449-
```
450-
After the class is implemented, we need to register the tests with `generate_c2_gradient_test` function.
451-
452-
This concludes the overview of the operator benchmark suite.

0 commit comments

Comments
 (0)