run_and_upload.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. import argparse
  2. import os
  3. import re
  4. import copy
  5. import uuid
  6. import calendar
  7. import time
  8. import big_query_utils
  9. import datetime
  10. import json
  11. # This import depends on the automake rule protoc_middleman, please make sure
  12. # protoc_middleman has been built before run this file.
  13. import os.path, sys
  14. sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
  15. import tmp.benchmarks_pb2 as benchmarks_pb2
  16. from click.types import STRING
  17. _PROJECT_ID = 'grpc-testing'
  18. _DATASET = 'protobuf_benchmark_result'
  19. _TABLE = 'opensource_result_v1'
  20. _NOW = "%d%02d%02d" % (datetime.datetime.now().year,
  21. datetime.datetime.now().month,
  22. datetime.datetime.now().day)
  23. file_size_map = {}
  24. def get_data_size(file_name):
  25. if file_name in file_size_map:
  26. return file_size_map[file_name]
  27. benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
  28. benchmark_dataset.ParseFromString(
  29. open(os.path.dirname(os.path.abspath(__file__)) + "/../" + file_name).read())
  30. size = 0
  31. count = 0
  32. for payload in benchmark_dataset.payload:
  33. size += len(payload)
  34. count += 1
  35. file_size_map[file_name] = (size, 1.0 * size / count)
  36. return size, 1.0 * size / count
  37. def extract_file_name(file_name):
  38. name_list = re.split("[/\.]", file_name)
  39. short_file_name = ""
  40. for name in name_list:
  41. if name[:14] == "google_message":
  42. short_file_name = name
  43. return short_file_name
  44. cpp_result = []
  45. python_result = []
  46. java_result = []
  47. go_result = []
  48. # CPP results example:
  49. # [
  50. # "benchmarks": [
  51. # {
  52. # "bytes_per_second": int,
  53. # "cpu_time": int,
  54. # "name: string,
  55. # "time_unit: string,
  56. # ...
  57. # },
  58. # ...
  59. # ],
  60. # ...
  61. # ]
  62. def parse_cpp_result(filename):
  63. global cpp_result
  64. if filename == "":
  65. return
  66. if filename[0] != '/':
  67. filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
  68. with open(filename) as f:
  69. results = json.loads(f.read())
  70. for benchmark in results["benchmarks"]:
  71. data_filename = "".join(
  72. re.split("(_parse_|_serialize)", benchmark["name"])[0])
  73. behavior = benchmark["name"][len(data_filename) + 1:]
  74. cpp_result.append({
  75. "language": "cpp",
  76. "dataFileName": data_filename,
  77. "behavior": behavior,
  78. "throughput": benchmark["bytes_per_second"] / 2.0 ** 20
  79. })
  80. # Python results example:
  81. # [
  82. # [
  83. # {
  84. # "filename": string,
  85. # "benchmarks": {
  86. # behavior: results,
  87. # ...
  88. # },
  89. # "message_name": STRING
  90. # },
  91. # ...
  92. # ], #pure-python
  93. # ...
  94. # ]
  95. def parse_python_result(filename):
  96. global python_result
  97. if filename == "":
  98. return
  99. if filename[0] != '/':
  100. filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
  101. with open(filename) as f:
  102. results_list = json.loads(f.read())
  103. for results in results_list:
  104. for result in results:
  105. _, avg_size = get_data_size(result["filename"])
  106. for behavior in result["benchmarks"]:
  107. python_result.append({
  108. "language": "python",
  109. "dataFileName": extract_file_name(result["filename"]),
  110. "behavior": behavior,
  111. "throughput": avg_size /
  112. result["benchmarks"][behavior] * 1e9 / 2 ** 20
  113. })
  114. # Java results example:
  115. # [
  116. # {
  117. # "id": string,
  118. # "instrumentSpec": {...},
  119. # "measurements": [
  120. # {
  121. # "weight": float,
  122. # "value": {
  123. # "magnitude": float,
  124. # "unit": string
  125. # },
  126. # ...
  127. # },
  128. # ...
  129. # ],
  130. # "run": {...},
  131. # "scenario": {
  132. # "benchmarkSpec": {
  133. # "methodName": string,
  134. # "parameters": {
  135. # defined parameters in the benchmark: parameters value
  136. # },
  137. # ...
  138. # },
  139. # ...
  140. # }
  141. #
  142. # },
  143. # ...
  144. # ]
  145. def parse_java_result(filename):
  146. global average_bytes_per_message, java_result
  147. if filename == "":
  148. return
  149. if filename[0] != '/':
  150. filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
  151. with open(filename) as f:
  152. results = json.loads(f.read())
  153. for result in results:
  154. total_weight = 0
  155. total_value = 0
  156. for measurement in result["measurements"]:
  157. total_weight += measurement["weight"]
  158. total_value += measurement["value"]["magnitude"]
  159. avg_time = total_value * 1.0 / total_weight
  160. total_size, _ = get_data_size(
  161. result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
  162. java_result.append({
  163. "language": "java",
  164. "throughput": total_size / avg_time * 1e9 / 2 ** 20,
  165. "behavior": result["scenario"]["benchmarkSpec"]["methodName"],
  166. "dataFileName": extract_file_name(
  167. result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
  168. })
  169. # Go benchmark results:
  170. #
  171. # goos: linux
  172. # goarch: amd64
  173. # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Unmarshal-12 3000 705784 ns/op
  174. # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Marshal-12 2000 634648 ns/op
  175. # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Size-12 5000 244174 ns/op
  176. # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Clone-12 300 4120954 ns/op
  177. # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Merge-12 300 4108632 ns/op
  178. # PASS
  179. # ok _/usr/local/google/home/yilunchong/mygit/protobuf/benchmarks 124.173s
  180. def parse_go_result(filename):
  181. global go_result
  182. if filename == "":
  183. return
  184. if filename[0] != '/':
  185. filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
  186. with open(filename) as f:
  187. for line in f:
  188. result_list = re.split("[\ \t]+", line)
  189. if result_list[0][:9] != "Benchmark":
  190. continue
  191. first_slash_index = result_list[0].find('/')
  192. last_slash_index = result_list[0].rfind('/')
  193. full_filename = result_list[0][first_slash_index+4:last_slash_index] # delete ../ prefix
  194. total_bytes, _ = get_data_size(full_filename)
  195. behavior_with_suffix = result_list[0][last_slash_index+1:]
  196. last_dash = behavior_with_suffix.rfind("-")
  197. if last_dash == -1:
  198. behavior = behavior_with_suffix
  199. else:
  200. behavior = behavior_with_suffix[:last_dash]
  201. go_result.append({
  202. "dataFilename": extract_file_name(full_filename),
  203. "throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20,
  204. "behavior": behavior,
  205. "language": "go"
  206. })
  207. def get_metadata():
  208. build_number = os.getenv('BUILD_NUMBER')
  209. build_url = os.getenv('BUILD_URL')
  210. job_name = os.getenv('JOB_NAME')
  211. git_commit = os.getenv('GIT_COMMIT')
  212. # actual commit is the actual head of PR that is getting tested
  213. git_actual_commit = os.getenv('ghprbActualCommit')
  214. utc_timestamp = str(calendar.timegm(time.gmtime()))
  215. metadata = {'created': utc_timestamp}
  216. if build_number:
  217. metadata['buildNumber'] = build_number
  218. if build_url:
  219. metadata['buildUrl'] = build_url
  220. if job_name:
  221. metadata['jobName'] = job_name
  222. if git_commit:
  223. metadata['gitCommit'] = git_commit
  224. if git_actual_commit:
  225. metadata['gitActualCommit'] = git_actual_commit
  226. return metadata
  227. def upload_result(result_list, metadata):
  228. for result in result_list:
  229. new_result = copy.deepcopy(result)
  230. new_result['metadata'] = metadata
  231. bq = big_query_utils.create_big_query()
  232. row = big_query_utils.make_row(str(uuid.uuid4()), new_result)
  233. if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET,
  234. _TABLE + "$" + _NOW,
  235. [row]):
  236. print 'Error when uploading result', new_result
  237. if __name__ == "__main__":
  238. parser = argparse.ArgumentParser()
  239. parser.add_argument("-cpp", "--cpp_input_file",
  240. help="The CPP benchmark result file's name",
  241. default="")
  242. parser.add_argument("-java", "--java_input_file",
  243. help="The Java benchmark result file's name",
  244. default="")
  245. parser.add_argument("-python", "--python_input_file",
  246. help="The Python benchmark result file's name",
  247. default="")
  248. parser.add_argument("-go", "--go_input_file",
  249. help="The golang benchmark result file's name",
  250. default="")
  251. args = parser.parse_args()
  252. parse_cpp_result(args.cpp_input_file)
  253. parse_python_result(args.python_input_file)
  254. parse_java_result(args.java_input_file)
  255. parse_go_result(args.go_input_file)
  256. metadata = get_metadata()
  257. print "uploading cpp results..."
  258. upload_result(cpp_result, metadata)
  259. print "uploading java results..."
  260. upload_result(java_result, metadata)
  261. print "uploading python results..."
  262. upload_result(python_result, metadata)
  263. print "uploading go results..."
  264. upload_result(go_result, metadata)