text_format.py 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. # Protocol Buffers - Google's data interchange format
  2. # Copyright 2008 Google Inc. All rights reserved.
  3. # https://developers.google.com/protocol-buffers/
  4. #
  5. # Redistribution and use in source and binary forms, with or without
  6. # modification, are permitted provided that the following conditions are
  7. # met:
  8. #
  9. # * Redistributions of source code must retain the above copyright
  10. # notice, this list of conditions and the following disclaimer.
  11. # * Redistributions in binary form must reproduce the above
  12. # copyright notice, this list of conditions and the following disclaimer
  13. # in the documentation and/or other materials provided with the
  14. # distribution.
  15. # * Neither the name of Google Inc. nor the names of its
  16. # contributors may be used to endorse or promote products derived from
  17. # this software without specific prior written permission.
  18. #
  19. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. """Contains routines for printing protocol messages in text format.
  31. Simple usage example:
  32. # Create a proto object and serialize it to a text proto string.
  33. message = my_proto_pb2.MyMessage(foo='bar')
  34. text_proto = text_format.MessageToString(message)
  35. # Parse a text proto string.
  36. message = text_format.Parse(text_proto, my_proto_pb2.MyMessage())
  37. """
  38. __author__ = 'kenton@google.com (Kenton Varda)'
  39. import io
  40. import re
  41. import six
  42. if six.PY3:
  43. long = int
  44. from google.protobuf.internal import type_checkers
  45. from google.protobuf import descriptor
  46. from google.protobuf import text_encoding
  47. __all__ = ['MessageToString', 'PrintMessage', 'PrintField',
  48. 'PrintFieldValue', 'Merge']
  49. _INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(),
  50. type_checkers.Int32ValueChecker(),
  51. type_checkers.Uint64ValueChecker(),
  52. type_checkers.Int64ValueChecker())
  53. _FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE)
  54. _FLOAT_NAN = re.compile('nanf?', re.IGNORECASE)
  55. _FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
  56. descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
  57. _QUOTES = frozenset(("'", '"'))
  58. class Error(Exception):
  59. """Top-level module error for text_format."""
  60. class ParseError(Error):
  61. """Thrown in case of text parsing error."""
  62. class TextWriter(object):
  63. def __init__(self, as_utf8):
  64. if six.PY2:
  65. self._writer = io.BytesIO()
  66. else:
  67. self._writer = io.StringIO()
  68. def write(self, val):
  69. if six.PY2:
  70. if isinstance(val, six.text_type):
  71. val = val.encode('utf-8')
  72. return self._writer.write(val)
  73. def close(self):
  74. return self._writer.close()
  75. def getvalue(self):
  76. return self._writer.getvalue()
  77. def MessageToString(message, as_utf8=False, as_one_line=False,
  78. pointy_brackets=False, use_index_order=False,
  79. float_format=None):
  80. """Convert protobuf message to text format.
  81. Floating point values can be formatted compactly with 15 digits of
  82. precision (which is the most that IEEE 754 "double" can guarantee)
  83. using float_format='.15g'. To ensure that converting to text and back to a
  84. proto will result in an identical value, float_format='.17g' should be used.
  85. Args:
  86. message: The protocol buffers message.
  87. as_utf8: Produce text output in UTF8 format.
  88. as_one_line: Don't introduce newlines between fields.
  89. pointy_brackets: If True, use angle brackets instead of curly braces for
  90. nesting.
  91. use_index_order: If True, print fields of a proto message using the order
  92. defined in source code instead of the field number. By default, use the
  93. field number order.
  94. float_format: If set, use this to specify floating point number formatting
  95. (per the "Format Specification Mini-Language"); otherwise, str() is used.
  96. Returns:
  97. A string of the text formatted protocol buffer message.
  98. """
  99. out = TextWriter(as_utf8)
  100. PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line,
  101. pointy_brackets=pointy_brackets,
  102. use_index_order=use_index_order,
  103. float_format=float_format)
  104. result = out.getvalue()
  105. out.close()
  106. if as_one_line:
  107. return result.rstrip()
  108. return result
  109. def _IsMapEntry(field):
  110. return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
  111. field.message_type.has_options and
  112. field.message_type.GetOptions().map_entry)
  113. def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False,
  114. pointy_brackets=False, use_index_order=False,
  115. float_format=None):
  116. fields = message.ListFields()
  117. if use_index_order:
  118. fields.sort(key=lambda x: x[0].index)
  119. for field, value in fields:
  120. if _IsMapEntry(field):
  121. for key in sorted(value):
  122. # This is slow for maps with submessage entires because it copies the
  123. # entire tree. Unfortunately this would take significant refactoring
  124. # of this file to work around.
  125. #
  126. # TODO(haberman): refactor and optimize if this becomes an issue.
  127. entry_submsg = field.message_type._concrete_class(
  128. key=key, value=value[key])
  129. PrintField(field, entry_submsg, out, indent, as_utf8, as_one_line,
  130. pointy_brackets=pointy_brackets,
  131. use_index_order=use_index_order, float_format=float_format)
  132. elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
  133. for element in value:
  134. PrintField(field, element, out, indent, as_utf8, as_one_line,
  135. pointy_brackets=pointy_brackets,
  136. use_index_order=use_index_order,
  137. float_format=float_format)
  138. else:
  139. PrintField(field, value, out, indent, as_utf8, as_one_line,
  140. pointy_brackets=pointy_brackets,
  141. use_index_order=use_index_order,
  142. float_format=float_format)
  143. def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False,
  144. pointy_brackets=False, use_index_order=False, float_format=None):
  145. """Print a single field name/value pair. For repeated fields, the value
  146. should be a single element.
  147. """
  148. out.write(' ' * indent)
  149. if field.is_extension:
  150. out.write('[')
  151. if (field.containing_type.GetOptions().message_set_wire_format and
  152. field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
  153. field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
  154. out.write(field.message_type.full_name)
  155. else:
  156. out.write(field.full_name)
  157. out.write(']')
  158. elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
  159. # For groups, use the capitalized name.
  160. out.write(field.message_type.name)
  161. else:
  162. out.write(field.name)
  163. if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
  164. # The colon is optional in this case, but our cross-language golden files
  165. # don't include it.
  166. out.write(': ')
  167. PrintFieldValue(field, value, out, indent, as_utf8, as_one_line,
  168. pointy_brackets=pointy_brackets,
  169. use_index_order=use_index_order,
  170. float_format=float_format)
  171. if as_one_line:
  172. out.write(' ')
  173. else:
  174. out.write('\n')
  175. def PrintFieldValue(field, value, out, indent=0, as_utf8=False,
  176. as_one_line=False, pointy_brackets=False,
  177. use_index_order=False,
  178. float_format=None):
  179. """Print a single field value (not including name). For repeated fields,
  180. the value should be a single element."""
  181. if pointy_brackets:
  182. openb = '<'
  183. closeb = '>'
  184. else:
  185. openb = '{'
  186. closeb = '}'
  187. if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
  188. if as_one_line:
  189. out.write(' %s ' % openb)
  190. PrintMessage(value, out, indent, as_utf8, as_one_line,
  191. pointy_brackets=pointy_brackets,
  192. use_index_order=use_index_order,
  193. float_format=float_format)
  194. out.write(closeb)
  195. else:
  196. out.write(' %s\n' % openb)
  197. PrintMessage(value, out, indent + 2, as_utf8, as_one_line,
  198. pointy_brackets=pointy_brackets,
  199. use_index_order=use_index_order,
  200. float_format=float_format)
  201. out.write(' ' * indent + closeb)
  202. elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
  203. enum_value = field.enum_type.values_by_number.get(value, None)
  204. if enum_value is not None:
  205. out.write(enum_value.name)
  206. else:
  207. out.write(str(value))
  208. elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
  209. out.write('\"')
  210. if isinstance(value, six.text_type):
  211. out_value = value.encode('utf-8')
  212. else:
  213. out_value = value
  214. if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
  215. # We need to escape non-UTF8 chars in TYPE_BYTES field.
  216. out_as_utf8 = False
  217. else:
  218. out_as_utf8 = as_utf8
  219. out.write(text_encoding.CEscape(out_value, out_as_utf8))
  220. out.write('\"')
  221. elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
  222. if value:
  223. out.write('true')
  224. else:
  225. out.write('false')
  226. elif field.cpp_type in _FLOAT_TYPES and float_format is not None:
  227. out.write('{1:{0}}'.format(float_format, value))
  228. else:
  229. out.write(str(value))
  230. def Parse(text, message, allow_unknown_extension=False):
  231. """Parses an text representation of a protocol message into a message.
  232. Args:
  233. text: Message text representation.
  234. message: A protocol buffer message to merge into.
  235. allow_unknown_extension: if True, skip over missing extensions and keep
  236. parsing
  237. Returns:
  238. The same message passed as argument.
  239. Raises:
  240. ParseError: On text parsing problems.
  241. """
  242. if not isinstance(text, str):
  243. text = text.decode('utf-8')
  244. return ParseLines(text.split('\n'), message, allow_unknown_extension)
  245. def Merge(text, message, allow_unknown_extension=False):
  246. """Parses an text representation of a protocol message into a message.
  247. Like Parse(), but allows repeated values for a non-repeated field, and uses
  248. the last one.
  249. Args:
  250. text: Message text representation.
  251. message: A protocol buffer message to merge into.
  252. allow_unknown_extension: if True, skip over missing extensions and keep
  253. parsing
  254. Returns:
  255. The same message passed as argument.
  256. Raises:
  257. ParseError: On text parsing problems.
  258. """
  259. return MergeLines(text.split('\n'), message, allow_unknown_extension)
  260. def ParseLines(lines, message, allow_unknown_extension=False):
  261. """Parses an text representation of a protocol message into a message.
  262. Args:
  263. lines: An iterable of lines of a message's text representation.
  264. message: A protocol buffer message to merge into.
  265. allow_unknown_extension: if True, skip over missing extensions and keep
  266. parsing
  267. Returns:
  268. The same message passed as argument.
  269. Raises:
  270. ParseError: On text parsing problems.
  271. """
  272. _ParseOrMerge(lines, message, False, allow_unknown_extension)
  273. return message
  274. def MergeLines(lines, message, allow_unknown_extension=False):
  275. """Parses an text representation of a protocol message into a message.
  276. Args:
  277. lines: An iterable of lines of a message's text representation.
  278. message: A protocol buffer message to merge into.
  279. allow_unknown_extension: if True, skip over missing extensions and keep
  280. parsing
  281. Returns:
  282. The same message passed as argument.
  283. Raises:
  284. ParseError: On text parsing problems.
  285. """
  286. _ParseOrMerge(lines, message, True, allow_unknown_extension)
  287. return message
  288. def _ParseOrMerge(lines,
  289. message,
  290. allow_multiple_scalars,
  291. allow_unknown_extension=False):
  292. """Converts an text representation of a protocol message into a message.
  293. Args:
  294. lines: Lines of a message's text representation.
  295. message: A protocol buffer message to merge into.
  296. allow_multiple_scalars: Determines if repeated values for a non-repeated
  297. field are permitted, e.g., the string "foo: 1 foo: 2" for a
  298. required/optional field named "foo".
  299. allow_unknown_extension: if True, skip over missing extensions and keep
  300. parsing
  301. Raises:
  302. ParseError: On text parsing problems.
  303. """
  304. tokenizer = _Tokenizer(lines)
  305. while not tokenizer.AtEnd():
  306. _MergeField(tokenizer, message, allow_multiple_scalars,
  307. allow_unknown_extension)
  308. def _MergeField(tokenizer,
  309. message,
  310. allow_multiple_scalars,
  311. allow_unknown_extension=False):
  312. """Merges a single protocol message field into a message.
  313. Args:
  314. tokenizer: A tokenizer to parse the field name and values.
  315. message: A protocol message to record the data.
  316. allow_multiple_scalars: Determines if repeated values for a non-repeated
  317. field are permitted, e.g., the string "foo: 1 foo: 2" for a
  318. required/optional field named "foo".
  319. allow_unknown_extension: if True, skip over missing extensions and keep
  320. parsing
  321. Raises:
  322. ParseError: In case of text parsing problems.
  323. """
  324. message_descriptor = message.DESCRIPTOR
  325. if (hasattr(message_descriptor, 'syntax') and
  326. message_descriptor.syntax == 'proto3'):
  327. # Proto3 doesn't represent presence so we can't test if multiple
  328. # scalars have occurred. We have to allow them.
  329. allow_multiple_scalars = True
  330. if tokenizer.TryConsume('['):
  331. name = [tokenizer.ConsumeIdentifier()]
  332. while tokenizer.TryConsume('.'):
  333. name.append(tokenizer.ConsumeIdentifier())
  334. name = '.'.join(name)
  335. if not message_descriptor.is_extendable:
  336. raise tokenizer.ParseErrorPreviousToken(
  337. 'Message type "%s" does not have extensions.' %
  338. message_descriptor.full_name)
  339. # pylint: disable=protected-access
  340. field = message.Extensions._FindExtensionByName(name)
  341. # pylint: enable=protected-access
  342. if not field:
  343. if allow_unknown_extension:
  344. field = None
  345. else:
  346. raise tokenizer.ParseErrorPreviousToken(
  347. 'Extension "%s" not registered.' % name)
  348. elif message_descriptor != field.containing_type:
  349. raise tokenizer.ParseErrorPreviousToken(
  350. 'Extension "%s" does not extend message type "%s".' % (
  351. name, message_descriptor.full_name))
  352. tokenizer.Consume(']')
  353. else:
  354. name = tokenizer.ConsumeIdentifier()
  355. field = message_descriptor.fields_by_name.get(name, None)
  356. # Group names are expected to be capitalized as they appear in the
  357. # .proto file, which actually matches their type names, not their field
  358. # names.
  359. if not field:
  360. field = message_descriptor.fields_by_name.get(name.lower(), None)
  361. if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
  362. field = None
  363. if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
  364. field.message_type.name != name):
  365. field = None
  366. if not field:
  367. raise tokenizer.ParseErrorPreviousToken(
  368. 'Message type "%s" has no field named "%s".' % (
  369. message_descriptor.full_name, name))
  370. if field and field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
  371. is_map_entry = _IsMapEntry(field)
  372. tokenizer.TryConsume(':')
  373. if tokenizer.TryConsume('<'):
  374. end_token = '>'
  375. else:
  376. tokenizer.Consume('{')
  377. end_token = '}'
  378. if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
  379. if field.is_extension:
  380. sub_message = message.Extensions[field].add()
  381. elif is_map_entry:
  382. sub_message = field.message_type._concrete_class()
  383. else:
  384. sub_message = getattr(message, field.name).add()
  385. else:
  386. if field.is_extension:
  387. sub_message = message.Extensions[field]
  388. else:
  389. sub_message = getattr(message, field.name)
  390. sub_message.SetInParent()
  391. while not tokenizer.TryConsume(end_token):
  392. if tokenizer.AtEnd():
  393. raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
  394. _MergeField(tokenizer, sub_message, allow_multiple_scalars,
  395. allow_unknown_extension)
  396. if is_map_entry:
  397. value_cpptype = field.message_type.fields_by_name['value'].cpp_type
  398. if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
  399. value = getattr(message, field.name)[sub_message.key]
  400. value.MergeFrom(sub_message.value)
  401. else:
  402. getattr(message, field.name)[sub_message.key] = sub_message.value
  403. elif field:
  404. tokenizer.Consume(':')
  405. if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and
  406. tokenizer.TryConsume('[')):
  407. # Short repeated format, e.g. "foo: [1, 2, 3]"
  408. while True:
  409. _MergeScalarField(tokenizer, message, field, allow_multiple_scalars)
  410. if tokenizer.TryConsume(']'):
  411. break
  412. tokenizer.Consume(',')
  413. else:
  414. _MergeScalarField(tokenizer, message, field, allow_multiple_scalars)
  415. else: # Proto field is unknown.
  416. assert allow_unknown_extension
  417. _SkipFieldContents(tokenizer)
  418. # For historical reasons, fields may optionally be separated by commas or
  419. # semicolons.
  420. if not tokenizer.TryConsume(','):
  421. tokenizer.TryConsume(';')
  422. def _SkipFieldContents(tokenizer):
  423. """Skips over contents (value or message) of a field.
  424. Args:
  425. tokenizer: A tokenizer to parse the field name and values.
  426. """
  427. # Try to guess the type of this field.
  428. # If this field is not a message, there should be a ":" between the
  429. # field name and the field value and also the field value should not
  430. # start with "{" or "<" which indicates the beginning of a message body.
  431. # If there is no ":" or there is a "{" or "<" after ":", this field has
  432. # to be a message or the input is ill-formed.
  433. if tokenizer.TryConsume(':') and not tokenizer.LookingAt(
  434. '{') and not tokenizer.LookingAt('<'):
  435. _SkipFieldValue(tokenizer)
  436. else:
  437. _SkipFieldMessage(tokenizer)
  438. def _SkipField(tokenizer):
  439. """Skips over a complete field (name and value/message).
  440. Args:
  441. tokenizer: A tokenizer to parse the field name and values.
  442. """
  443. if tokenizer.TryConsume('['):
  444. # Consume extension name.
  445. tokenizer.ConsumeIdentifier()
  446. while tokenizer.TryConsume('.'):
  447. tokenizer.ConsumeIdentifier()
  448. tokenizer.Consume(']')
  449. else:
  450. tokenizer.ConsumeIdentifier()
  451. _SkipFieldContents(tokenizer)
  452. # For historical reasons, fields may optionally be separated by commas or
  453. # semicolons.
  454. if not tokenizer.TryConsume(','):
  455. tokenizer.TryConsume(';')
  456. def _SkipFieldMessage(tokenizer):
  457. """Skips over a field message.
  458. Args:
  459. tokenizer: A tokenizer to parse the field name and values.
  460. """
  461. if tokenizer.TryConsume('<'):
  462. delimiter = '>'
  463. else:
  464. tokenizer.Consume('{')
  465. delimiter = '}'
  466. while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
  467. _SkipField(tokenizer)
  468. tokenizer.Consume(delimiter)
  469. def _SkipFieldValue(tokenizer):
  470. """Skips over a field value.
  471. Args:
  472. tokenizer: A tokenizer to parse the field name and values.
  473. Raises:
  474. ParseError: In case an invalid field value is found.
  475. """
  476. # String tokens can come in multiple adjacent string literals.
  477. # If we can consume one, consume as many as we can.
  478. if tokenizer.TryConsumeString():
  479. while tokenizer.TryConsumeString():
  480. pass
  481. return
  482. if (not tokenizer.TryConsumeIdentifier() and
  483. not tokenizer.TryConsumeInt64() and
  484. not tokenizer.TryConsumeUint64() and
  485. not tokenizer.TryConsumeFloat()):
  486. raise ParseError('Invalid field value: ' + tokenizer.token)
  487. def _MergeScalarField(tokenizer, message, field, allow_multiple_scalars):
  488. """Merges a single protocol message scalar field into a message.
  489. Args:
  490. tokenizer: A tokenizer to parse the field value.
  491. message: A protocol message to record the data.
  492. field: The descriptor of the field to be merged.
  493. allow_multiple_scalars: Determines if repeated values for a non-repeated
  494. field are permitted, e.g., the string "foo: 1 foo: 2" for a
  495. required/optional field named "foo".
  496. Raises:
  497. ParseError: In case of text parsing problems.
  498. RuntimeError: On runtime errors.
  499. """
  500. value = None
  501. if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
  502. descriptor.FieldDescriptor.TYPE_SINT32,
  503. descriptor.FieldDescriptor.TYPE_SFIXED32):
  504. value = tokenizer.ConsumeInt32()
  505. elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
  506. descriptor.FieldDescriptor.TYPE_SINT64,
  507. descriptor.FieldDescriptor.TYPE_SFIXED64):
  508. value = tokenizer.ConsumeInt64()
  509. elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
  510. descriptor.FieldDescriptor.TYPE_FIXED32):
  511. value = tokenizer.ConsumeUint32()
  512. elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
  513. descriptor.FieldDescriptor.TYPE_FIXED64):
  514. value = tokenizer.ConsumeUint64()
  515. elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
  516. descriptor.FieldDescriptor.TYPE_DOUBLE):
  517. value = tokenizer.ConsumeFloat()
  518. elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
  519. value = tokenizer.ConsumeBool()
  520. elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
  521. value = tokenizer.ConsumeString()
  522. elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
  523. value = tokenizer.ConsumeByteString()
  524. elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
  525. value = tokenizer.ConsumeEnum(field)
  526. else:
  527. raise RuntimeError('Unknown field type %d' % field.type)
  528. if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
  529. if field.is_extension:
  530. message.Extensions[field].append(value)
  531. else:
  532. getattr(message, field.name).append(value)
  533. else:
  534. if field.is_extension:
  535. if not allow_multiple_scalars and message.HasExtension(field):
  536. raise tokenizer.ParseErrorPreviousToken(
  537. 'Message type "%s" should not have multiple "%s" extensions.' %
  538. (message.DESCRIPTOR.full_name, field.full_name))
  539. else:
  540. message.Extensions[field] = value
  541. else:
  542. if not allow_multiple_scalars and message.HasField(field.name):
  543. raise tokenizer.ParseErrorPreviousToken(
  544. 'Message type "%s" should not have multiple "%s" fields.' %
  545. (message.DESCRIPTOR.full_name, field.name))
  546. else:
  547. setattr(message, field.name, value)
  548. class _Tokenizer(object):
  549. """Protocol buffer text representation tokenizer.
  550. This class handles the lower level string parsing by splitting it into
  551. meaningful tokens.
  552. It was directly ported from the Java protocol buffer API.
  553. """
  554. _WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
  555. _TOKEN = re.compile('|'.join([
  556. r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier
  557. r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number
  558. ] + [ # quoted str for each quote mark
  559. r'{qt}([^{qt}\n\\]|\\.)*({qt}|\\?$)'.format(qt=mark) for mark in _QUOTES
  560. ]))
  561. _IDENTIFIER = re.compile(r'\w+')
  562. def __init__(self, lines):
  563. self._position = 0
  564. self._line = -1
  565. self._column = 0
  566. self._token_start = None
  567. self.token = ''
  568. self._lines = iter(lines)
  569. self._current_line = ''
  570. self._previous_line = 0
  571. self._previous_column = 0
  572. self._more_lines = True
  573. self._SkipWhitespace()
  574. self.NextToken()
  575. def LookingAt(self, token):
  576. return self.token == token
  577. def AtEnd(self):
  578. """Checks the end of the text was reached.
  579. Returns:
  580. True iff the end was reached.
  581. """
  582. return not self.token
  583. def _PopLine(self):
  584. while len(self._current_line) <= self._column:
  585. try:
  586. self._current_line = next(self._lines)
  587. except StopIteration:
  588. self._current_line = ''
  589. self._more_lines = False
  590. return
  591. else:
  592. self._line += 1
  593. self._column = 0
  594. def _SkipWhitespace(self):
  595. while True:
  596. self._PopLine()
  597. match = self._WHITESPACE.match(self._current_line, self._column)
  598. if not match:
  599. break
  600. length = len(match.group(0))
  601. self._column += length
  602. def TryConsume(self, token):
  603. """Tries to consume a given piece of text.
  604. Args:
  605. token: Text to consume.
  606. Returns:
  607. True iff the text was consumed.
  608. """
  609. if self.token == token:
  610. self.NextToken()
  611. return True
  612. return False
  613. def Consume(self, token):
  614. """Consumes a piece of text.
  615. Args:
  616. token: Text to consume.
  617. Raises:
  618. ParseError: If the text couldn't be consumed.
  619. """
  620. if not self.TryConsume(token):
  621. raise self._ParseError('Expected "%s".' % token)
  622. def TryConsumeIdentifier(self):
  623. try:
  624. self.ConsumeIdentifier()
  625. return True
  626. except ParseError:
  627. return False
  628. def ConsumeIdentifier(self):
  629. """Consumes protocol message field identifier.
  630. Returns:
  631. Identifier string.
  632. Raises:
  633. ParseError: If an identifier couldn't be consumed.
  634. """
  635. result = self.token
  636. if not self._IDENTIFIER.match(result):
  637. raise self._ParseError('Expected identifier.')
  638. self.NextToken()
  639. return result
  640. def ConsumeInt32(self):
  641. """Consumes a signed 32bit integer number.
  642. Returns:
  643. The integer parsed.
  644. Raises:
  645. ParseError: If a signed 32bit integer couldn't be consumed.
  646. """
  647. try:
  648. result = ParseInteger(self.token, is_signed=True, is_long=False)
  649. except ValueError as e:
  650. raise self._ParseError(str(e))
  651. self.NextToken()
  652. return result
  653. def ConsumeUint32(self):
  654. """Consumes an unsigned 32bit integer number.
  655. Returns:
  656. The integer parsed.
  657. Raises:
  658. ParseError: If an unsigned 32bit integer couldn't be consumed.
  659. """
  660. try:
  661. result = ParseInteger(self.token, is_signed=False, is_long=False)
  662. except ValueError as e:
  663. raise self._ParseError(str(e))
  664. self.NextToken()
  665. return result
  666. def TryConsumeInt64(self):
  667. try:
  668. self.ConsumeInt64()
  669. return True
  670. except ParseError:
  671. return False
  672. def ConsumeInt64(self):
  673. """Consumes a signed 64bit integer number.
  674. Returns:
  675. The integer parsed.
  676. Raises:
  677. ParseError: If a signed 64bit integer couldn't be consumed.
  678. """
  679. try:
  680. result = ParseInteger(self.token, is_signed=True, is_long=True)
  681. except ValueError as e:
  682. raise self._ParseError(str(e))
  683. self.NextToken()
  684. return result
  685. def TryConsumeUint64(self):
  686. try:
  687. self.ConsumeUint64()
  688. return True
  689. except ParseError:
  690. return False
  691. def ConsumeUint64(self):
  692. """Consumes an unsigned 64bit integer number.
  693. Returns:
  694. The integer parsed.
  695. Raises:
  696. ParseError: If an unsigned 64bit integer couldn't be consumed.
  697. """
  698. try:
  699. result = ParseInteger(self.token, is_signed=False, is_long=True)
  700. except ValueError as e:
  701. raise self._ParseError(str(e))
  702. self.NextToken()
  703. return result
  704. def TryConsumeFloat(self):
  705. try:
  706. self.ConsumeFloat()
  707. return True
  708. except ParseError:
  709. return False
  710. def ConsumeFloat(self):
  711. """Consumes an floating point number.
  712. Returns:
  713. The number parsed.
  714. Raises:
  715. ParseError: If a floating point number couldn't be consumed.
  716. """
  717. try:
  718. result = ParseFloat(self.token)
  719. except ValueError as e:
  720. raise self._ParseError(str(e))
  721. self.NextToken()
  722. return result
  723. def ConsumeBool(self):
  724. """Consumes a boolean value.
  725. Returns:
  726. The bool parsed.
  727. Raises:
  728. ParseError: If a boolean value couldn't be consumed.
  729. """
  730. try:
  731. result = ParseBool(self.token)
  732. except ValueError as e:
  733. raise self._ParseError(str(e))
  734. self.NextToken()
  735. return result
  736. def TryConsumeString(self):
  737. try:
  738. self.ConsumeString()
  739. return True
  740. except ParseError:
  741. return False
  742. def ConsumeString(self):
  743. """Consumes a string value.
  744. Returns:
  745. The string parsed.
  746. Raises:
  747. ParseError: If a string value couldn't be consumed.
  748. """
  749. the_bytes = self.ConsumeByteString()
  750. try:
  751. return six.text_type(the_bytes, 'utf-8')
  752. except UnicodeDecodeError as e:
  753. raise self._StringParseError(e)
  754. def ConsumeByteString(self):
  755. """Consumes a byte array value.
  756. Returns:
  757. The array parsed (as a string).
  758. Raises:
  759. ParseError: If a byte array value couldn't be consumed.
  760. """
  761. the_list = [self._ConsumeSingleByteString()]
  762. while self.token and self.token[0] in _QUOTES:
  763. the_list.append(self._ConsumeSingleByteString())
  764. return b''.join(the_list)
  765. def _ConsumeSingleByteString(self):
  766. """Consume one token of a string literal.
  767. String literals (whether bytes or text) can come in multiple adjacent
  768. tokens which are automatically concatenated, like in C or Python. This
  769. method only consumes one token.
  770. Returns:
  771. The token parsed.
  772. Raises:
  773. ParseError: When the wrong format data is found.
  774. """
  775. text = self.token
  776. if len(text) < 1 or text[0] not in _QUOTES:
  777. raise self._ParseError('Expected string but found: %r' % (text,))
  778. if len(text) < 2 or text[-1] != text[0]:
  779. raise self._ParseError('String missing ending quote: %r' % (text,))
  780. try:
  781. result = text_encoding.CUnescape(text[1:-1])
  782. except ValueError as e:
  783. raise self._ParseError(str(e))
  784. self.NextToken()
  785. return result
  786. def ConsumeEnum(self, field):
  787. try:
  788. result = ParseEnum(field, self.token)
  789. except ValueError as e:
  790. raise self._ParseError(str(e))
  791. self.NextToken()
  792. return result
  793. def ParseErrorPreviousToken(self, message):
  794. """Creates and *returns* a ParseError for the previously read token.
  795. Args:
  796. message: A message to set for the exception.
  797. Returns:
  798. A ParseError instance.
  799. """
  800. return ParseError('%d:%d : %s' % (
  801. self._previous_line + 1, self._previous_column + 1, message))
  802. def _ParseError(self, message):
  803. """Creates and *returns* a ParseError for the current token."""
  804. return ParseError('%d:%d : %s' % (
  805. self._line + 1, self._column + 1, message))
  806. def _StringParseError(self, e):
  807. return self._ParseError('Couldn\'t parse string: ' + str(e))
  808. def NextToken(self):
  809. """Reads the next meaningful token."""
  810. self._previous_line = self._line
  811. self._previous_column = self._column
  812. self._column += len(self.token)
  813. self._SkipWhitespace()
  814. if not self._more_lines:
  815. self.token = ''
  816. return
  817. match = self._TOKEN.match(self._current_line, self._column)
  818. if match:
  819. token = match.group(0)
  820. self.token = token
  821. else:
  822. self.token = self._current_line[self._column]
  823. def ParseInteger(text, is_signed=False, is_long=False):
  824. """Parses an integer.
  825. Args:
  826. text: The text to parse.
  827. is_signed: True if a signed integer must be parsed.
  828. is_long: True if a long integer must be parsed.
  829. Returns:
  830. The integer value.
  831. Raises:
  832. ValueError: Thrown Iff the text is not a valid integer.
  833. """
  834. # Do the actual parsing. Exception handling is propagated to caller.
  835. try:
  836. # We force 32-bit values to int and 64-bit values to long to make
  837. # alternate implementations where the distinction is more significant
  838. # (e.g. the C++ implementation) simpler.
  839. if is_long:
  840. result = long(text, 0)
  841. else:
  842. result = int(text, 0)
  843. except ValueError:
  844. raise ValueError('Couldn\'t parse integer: %s' % text)
  845. # Check if the integer is sane. Exceptions handled by callers.
  846. checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
  847. checker.CheckValue(result)
  848. return result
  849. def ParseFloat(text):
  850. """Parse a floating point number.
  851. Args:
  852. text: Text to parse.
  853. Returns:
  854. The number parsed.
  855. Raises:
  856. ValueError: If a floating point number couldn't be parsed.
  857. """
  858. try:
  859. # Assume Python compatible syntax.
  860. return float(text)
  861. except ValueError:
  862. # Check alternative spellings.
  863. if _FLOAT_INFINITY.match(text):
  864. if text[0] == '-':
  865. return float('-inf')
  866. else:
  867. return float('inf')
  868. elif _FLOAT_NAN.match(text):
  869. return float('nan')
  870. else:
  871. # assume '1.0f' format
  872. try:
  873. return float(text.rstrip('f'))
  874. except ValueError:
  875. raise ValueError('Couldn\'t parse float: %s' % text)
  876. def ParseBool(text):
  877. """Parse a boolean value.
  878. Args:
  879. text: Text to parse.
  880. Returns:
  881. Boolean values parsed
  882. Raises:
  883. ValueError: If text is not a valid boolean.
  884. """
  885. if text in ('true', 't', '1'):
  886. return True
  887. elif text in ('false', 'f', '0'):
  888. return False
  889. else:
  890. raise ValueError('Expected "true" or "false".')
  891. def ParseEnum(field, value):
  892. """Parse an enum value.
  893. The value can be specified by a number (the enum value), or by
  894. a string literal (the enum name).
  895. Args:
  896. field: Enum field descriptor.
  897. value: String value.
  898. Returns:
  899. Enum value number.
  900. Raises:
  901. ValueError: If the enum value could not be parsed.
  902. """
  903. enum_descriptor = field.enum_type
  904. try:
  905. number = int(value, 0)
  906. except ValueError:
  907. # Identifier.
  908. enum_value = enum_descriptor.values_by_name.get(value, None)
  909. if enum_value is None:
  910. raise ValueError(
  911. 'Enum type "%s" has no value named %s.' % (
  912. enum_descriptor.full_name, value))
  913. else:
  914. # Numeric value.
  915. enum_value = enum_descriptor.values_by_number.get(number, None)
  916. if enum_value is None:
  917. raise ValueError(
  918. 'Enum type "%s" has no value with number %d.' % (
  919. enum_descriptor.full_name, number))
  920. return enum_value.number