py_benchmark.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. import sys
  2. import os
  3. import timeit
  4. import math
  5. import fnmatch
  6. # CPP generated code must be linked before importing the generated Python code
  7. # for the descriptor can be found in the pool
  8. if len(sys.argv) < 2:
  9. raise IOError("Need string argument \"true\" or \"false\" for whether to use cpp generated code")
  10. if sys.argv[1] == "true":
  11. sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) + "/.libs" )
  12. import libbenchmark_messages
  13. sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) + "/tmp" )
  14. elif sys.argv[1] != "false":
  15. raise IOError("Need string argument \"true\" or \"false\" for whether to use cpp generated code")
  16. import datasets.google_message1.benchmark_message1_proto2_pb2 as benchmark_message1_proto2_pb2
  17. import datasets.google_message1.benchmark_message1_proto3_pb2 as benchmark_message1_proto3_pb2
  18. import datasets.google_message2.benchmark_message2_pb2 as benchmark_message2_pb2
  19. import datasets.google_message3.benchmark_message3_pb2 as benchmark_message3_pb2
  20. import datasets.google_message4.benchmark_message4_pb2 as benchmark_message4_pb2
  21. import benchmarks_pb2
  22. def run_one_test(filename):
  23. data = open(os.path.dirname(sys.argv[0]) + "/../" + filename).read()
  24. benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
  25. benchmark_dataset.ParseFromString(data)
  26. benchmark_util = Benchmark(full_iteration=len(benchmark_dataset.payload),
  27. module="py_benchmark",
  28. setup_method="init")
  29. print "Message %s of dataset file %s" % \
  30. (benchmark_dataset.message_name, filename)
  31. benchmark_util.set_test_method("parse_from_benchmark")
  32. print benchmark_util.run_benchmark(setup_method_args='"%s"' % (filename))
  33. benchmark_util.set_test_method("serialize_to_benchmark")
  34. print benchmark_util.run_benchmark(setup_method_args='"%s"' % (filename))
  35. print ""
  36. def init(filename):
  37. global benchmark_dataset, message_class, message_list, counter
  38. message_list=[]
  39. counter = 0
  40. data = open(os.path.dirname(sys.argv[0]) + "/../" + filename).read()
  41. benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
  42. benchmark_dataset.ParseFromString(data)
  43. if benchmark_dataset.message_name == "benchmarks.proto3.GoogleMessage1":
  44. message_class = benchmark_message1_proto3_pb2.GoogleMessage1
  45. elif benchmark_dataset.message_name == "benchmarks.proto2.GoogleMessage1":
  46. message_class = benchmark_message1_proto2_pb2.GoogleMessage1
  47. elif benchmark_dataset.message_name == "benchmarks.proto2.GoogleMessage2":
  48. message_class = benchmark_message2_pb2.GoogleMessage2
  49. elif benchmark_dataset.message_name == "benchmarks.google_message3.GoogleMessage3":
  50. message_class = benchmark_message3_pb2.GoogleMessage3
  51. elif benchmark_dataset.message_name == "benchmarks.google_message4.GoogleMessage4":
  52. message_class = benchmark_message4_pb2.GoogleMessage4
  53. else:
  54. raise IOError("Message %s not found!" % (benchmark_dataset.message_name))
  55. for one_payload in benchmark_dataset.payload:
  56. temp = message_class()
  57. temp.ParseFromString(one_payload)
  58. message_list.append(temp)
  59. def parse_from_benchmark():
  60. global counter, message_class, benchmark_dataset
  61. m = message_class().ParseFromString(benchmark_dataset.payload[counter % len(benchmark_dataset.payload)])
  62. counter = counter + 1
  63. def serialize_to_benchmark():
  64. global counter, message_list, message_class
  65. s = message_list[counter % len(benchmark_dataset.payload)].SerializeToString()
  66. counter = counter + 1
  67. class Benchmark:
  68. def __init__(self, module=None, test_method=None,
  69. setup_method=None, full_iteration = 1):
  70. self.full_iteration = full_iteration
  71. self.module = module
  72. self.test_method = test_method
  73. self.setup_method = setup_method
  74. def set_test_method(self, test_method):
  75. self.test_method = test_method
  76. def full_setup_code(self, setup_method_args=''):
  77. setup_code = ""
  78. setup_code += "from %s import %s\n" % (self.module, self.test_method)
  79. setup_code += "from %s import %s\n" % (self.module, self.setup_method)
  80. setup_code += "%s(%s)\n" % (self.setup_method, setup_method_args)
  81. return setup_code
  82. def dry_run(self, test_method_args='', setup_method_args=''):
  83. return timeit.timeit(stmt="%s(%s)" % (self.test_method, test_method_args),
  84. setup=self.full_setup_code(setup_method_args),
  85. number=self.full_iteration);
  86. def run_benchmark(self, test_method_args='', setup_method_args=''):
  87. reps = self.full_iteration;
  88. t = self.dry_run(test_method_args, setup_method_args);
  89. if t < 3 :
  90. reps = int(math.ceil(3 / t)) * self.full_iteration
  91. t = timeit.timeit(stmt="%s(%s)" % (self.test_method, test_method_args),
  92. setup=self.full_setup_code(setup_method_args),
  93. number=reps);
  94. return "Average time for %s: %.2f ns" % \
  95. (self.test_method, 1.0 * t / reps * (10 ** 9))
  96. if __name__ == "__main__":
  97. for i in range(2, len(sys.argv)):
  98. run_one_test(sys.argv[i])