0

Implement automatic document generation for Network Traffic Annotations

This generator uses grouping.xml and annotations.tsv to generate a
Google Docs sheet for sys-admin clients. Refer to the README for
additional guidance.

Bug: 1107860
Change-Id: Ia9350fdc049e0e4b0ac2830968d9e60309bc3c27
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2283487
Commit-Queue: Mohamadou Bella Bah <bellabah@chromium.org>
Reviewed-by: Nicolas Ouellet-Payeur <nicolaso@chromium.org>
Reviewed-by: Ramin Halavati <rhalavati@chromium.org>
Cr-Commit-Position: refs/heads/master@{#795068}
This commit is contained in:
Bella Bah
2020-08-05 17:01:37 +00:00
committed by Commit Bot
parent 04d1ee4525
commit 2b772c131a
10 changed files with 1302 additions and 34 deletions

@ -289,7 +289,7 @@ change list. These checks include:
`tools/traffic_annotation/summary/grouping.xml`. When adding a new annotation,
it must also be included in `grouping.xml` for reporting purposes (please
refer to the **Annotations Review**).
### Presubmit tests
To perform tests prior to submit, one can use the `traffic_annotation_auditor`
@ -324,7 +324,7 @@ one is updated, or deleted, this file should also be updated. To update the
as specified in presubmit tests. But if it is not possible to do so (e.g., if
you are changing the code from an unsupported platform or you dont have a
compiled build directory), the code can be submitted to the trybot and the test
on trybot will tell you the required modifications.
on trybot will tell you the required modifications.
In order to help make external reports easier, annotation unique ids should be
mentioned in `tools/traffic_annotation/summary/grouping.xml`. Once a new

@ -29,3 +29,25 @@ annotations in code. It uses regex expressions on source files.
# extractor_test.py
Unit tests for extractor.py.
# update_annotations_doc.py
Updates the Chrome Browser Network Traffic Annotations document that presents
all network traffic annotations specified within `summary/grouping.xml`.
- You can use the `hidden="true"` attribute within a group to suppress the
group and its nested senders and annotations from appearing in the document.
- You can use the `hidden="true"` attribute within the annotations in
`grouping.xml` to suppress them from appearing in the document.
- `grouping.xml` needn't be organized in alphabetical order, the script
automatically places them in alphabetical order.
# update_annotations_doc_tests.py
Unit tests for update_annotations_doc.py.
# parser.py
Parses the `grouping.xml` and `annotations.tsv` files to provide
`update_annotations_doc.py` with the annotations and their relevant information,
e.g. unique_id, data, trigger, etc. Also includes methods to parse the json
object returned by the Google Docs API `get()` method.
# parser_tests.py
Unit tests for parser.py.

@ -0,0 +1,350 @@
#!/usr/bin/env vpython
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains the parsers for .tsv and .xml files, annotations.tsv and
grouping.xml respectively. Also includes methods to parse the json object
returned by the Google Doc API's .get() method.
These parsers are used to populate the duplicated Google Doc template with
several placeholders, and, to populate the traffic annotations with their
relevant attributes, e.g. description, policy, etc.
"""
from __future__ import print_function
from collections import namedtuple
from collections import OrderedDict
import xml.etree.ElementTree
import enum
import json
import csv
import sys
import io
import re
TrafficAnnotation = namedtuple(
"TrafficAnnotation",
["unique_id", "description", "trigger", "data", "settings", "policy"])
class Placeholder(str, enum.Enum):
GROUP = "group"
SENDER = "sender"
ANNOTATION = "annotation"
ANNOTATION_BOLD = "annotation_bold"
PLACEHOLDER_STYLES = {
Placeholder.GROUP: {
"bold": False,
"font": "Roboto",
"fontSize": 20,
"namedStyleType": "HEADING_1"
},
Placeholder.SENDER: {
"bold": True,
"font": "Roboto",
"fontSize": 14,
"namedStyleType": "HEADING_2"
},
Placeholder.ANNOTATION: {
"bold": False,
"font": "Roboto",
"fontSize": 9
},
Placeholder.ANNOTATION_BOLD: {
"bold": True,
"font": "Roboto",
"fontSize": 9
}
}
def utf_8_encoder(input_file):
for line in input_file:
yield line.encode("utf-8")
def load_tsv_file(file_path, verbose):
""" Loads annotations TSV file.
Args:
file_path: str
Path to the TSV file.
verbose: bool
Whether to print messages about ignored rows.
Returns:
list of list Table of loaded annotations.
"""
rows = []
with io.open(file_path, mode="r", encoding="utf-8") as csvfile:
# CSV library does not support unicode, so encoding to utf-8 and back.
reader = csv.reader(utf_8_encoder(csvfile), delimiter="\t")
for row in reader:
row = [unicode(col, "utf-8") for col in row]
# If the last column of the file_row is empty, the row belongs to a
# platform different from the one that TSV file is generated on, hence it
# should be ignored.
if row[-1]:
rows.append(row)
elif verbose:
print("Ignored from other platforms: %s" % row[0])
return rows
def map_annotations(tsv_contents):
"""Creates a mapping between the unique_id of a given annotation and its
relevant attributes, e.g. description, trigger, data, etc.
Args:
tsv_contents: List[List]
Table of loaded annotations.
Returns:
unique_id_rel_attributes_map: <Dict[str, TrafficAnnotation]>
"""
unique_id_rel_attributes_map = {}
for annotation_row in tsv_contents:
unique_id = annotation_row[0].encode("utf-8")
description = annotation_row[3].encode("utf-8")
trigger = annotation_row[4].encode("utf-8")
data = annotation_row[5].encode("utf-8")
settings = annotation_row[9].encode("utf-8")
policy = annotation_row[10].encode("utf-8")
payload = [unique_id, description, trigger, data, settings, policy]
unique_id_rel_attributes_map[unique_id] = TrafficAnnotation._make(payload)
return unique_id_rel_attributes_map
class XMLParser:
"""Parses grouping.xml with the aim of generating the placeholders list"""
def __init__(self, file_path, annotations_mapping):
"""
Args:
file_path: str
The file path to the xml to parse. Ostensibly, grouping.xml located
within traffic_annotation/summary.
annotations_mapping: Dict[str, dict]
The mapping between a given annotation's unique_id and its relevant
attributes, e.g. description, policy, data, etc.
"""
self.parsed_xml = {}
self.annotations_mapping = annotations_mapping
self.parse_xml(file_path)
def parse_xml(self, file_path):
"""Parses the grouping.xml file and populates self.parsed_xml.
self.parsed_xml: <{Group1: {sender: [traffic_annotations]}, ...}>
"""
tree = xml.etree.ElementTree.parse(file_path)
root = tree.getroot()
for group in root.iter("group"):
assert group.tag == "group"
group_name = group.attrib["name"]
# Suppress if hidden="true" in the group block. Will not include any of
# the senders and annotations in the block.
if group.attrib.get("hidden", "") == "true":
continue
self.parsed_xml[group_name] = {}
for sender in group.iter("sender"):
sender_name = sender.attrib["name"]
# Suppress if hidden="true" (or hidden is even mentioned) in the given
# annotation, don't include in traffic_annotations.
traffic_annotations = sorted([
t_annotation.attrib["unique_id"]
for t_annotation in sender.iter("traffic_annotation")
if t_annotation.attrib.get("hidden", "") != "true"
])
self.parsed_xml[group_name][sender_name] = traffic_annotations
def _sort_parsed_xml(self):
"""Sort on the group and sender keys in alphabetical order, note that
annotations are already sorted."""
self.parsed_xml = {
k: OrderedDict(sorted(v.items()))
for k, v in self.parsed_xml.items()
}
self.parsed_xml = OrderedDict(
sorted(self.parsed_xml.items(), key=lambda t: t[0]))
def _add_group_placeholder(self, name):
return {"type": Placeholder.GROUP, "name": name}
def _add_sender_placeholder(self, name):
return {"type": Placeholder.SENDER, "name": name}
def _add_annotation_placeholder(self, unique_id):
"""
Args:
unique_id: str
The annotation's unique_id.
"""
traffic_annotation = self.annotations_mapping.get(unique_id, None)
is_complete = traffic_annotation and all(traffic_annotation)
if not is_complete:
print(
"Warning: {} row is empty in annotations.tsv but is in grouping.xml".
format(unique_id))
traffic_annotation = TrafficAnnotation(unique_id, "NA", "NA", "NA", "NA",
"NA")
return {
"type": Placeholder.ANNOTATION, "traffic_annotation": traffic_annotation}
def build_placeholders(self):
"""
Returns:
The placeholders <list> to be added in the order of their appearance.
The annotations are the TrafficAnnotation objects with the relevant
information.
"""
self._sort_parsed_xml()
placeholders = []
for group, senders in self.parsed_xml.items():
placeholders.append(self._add_group_placeholder(group))
for sender, annotations in senders.items():
placeholders.append(self._add_sender_placeholder(sender))
for annotation in annotations:
placeholders.append(self._add_annotation_placeholder(annotation))
return placeholders
def jprint(msg):
print(json.dumps(msg, indent=4), file=sys.stderr)
def extract_body(document=None, target="body", json_file_path="template.json"):
"""Google Doc API returns a .json object. Parse this doc object to obtain its
body.
The |template.json| object of the current state of
the document can be obtained by running the update_annotations_doc.py script
using the --debug flag.
"""
if document:
doc = document
else:
try:
with open(json_file_path) as json_file:
doc = json.load(json_file)
except IOError:
print("Couldn't find the .json file.")
if target == "all":
return doc
return doc[target]
def find_first_index(doc):
"""Finds the cursor index (location) that comes right after the Introduction
section. Namely, the endIndex of the paragraph block the |target_text| belongs
to.
Returns: int
The first cursor index (loc) of the template document, right after the
Introduction section.
"""
target_text = "The policy, if one exists, to control this type of network"
padding = 1 # We pad so as to overwrite cleanly.
body = extract_body(document=doc)
contents = body["content"]
for element in contents:
if "paragraph" in element:
end_index = element["endIndex"]
lines = element["paragraph"]["elements"]
for text_run in lines:
if target_text in text_run["textRun"]["content"]:
return end_index + padding
def find_last_index(doc):
"""
Returns: int
The last cursor index (loc) of the template document.
"""
body = extract_body(document=doc)
contents = body["content"]
last_index = contents[-1]["endIndex"]
return last_index - 1
def find_chrome_browser_version(doc):
"""Finds what the current chrome browser version is in the document.
We grab the current "Chrome Browser version MAJOR.MINOR.BUILD.PATCH" from the
document's header.
Returns: str
The chrome browser version string.
"""
# Only one header.
header = extract_body(document=doc, target="headers").values()[0]
header_elements = header["content"][0]["paragraph"]["elements"]
text = header_elements[0]["textRun"]["content"]
current_version = re.search(r"([\d.]+)", text).group()
return current_version
def find_bold_ranges(doc, debug=False):
"""Finds parts to bold given the targets of "trigger", "data", etc.
Returns:
The startIndex <int> and endIndex <int> tuple pairs as a list for all
occurrences of the targets. <List[Tuple[int, int]]>
"""
bold_ranges = []
targets = ["Trigger", "Data", "Settings", "Policy"]
content = extract_body(document=doc)["content"]
for i, element in enumerate(content):
element_type = list(element.keys())[-1]
if element_type != "table":
continue
# Recall that table is 1x2 in Docs, first cell contains unique_id, second
# cell has traffic annotation relevant attributes.
# Unique id column, messy parsing through. You can inspect the json output
# with jprint() to confirm/debug if broken.
unique_id_col = element["table"]["tableRows"][0]["tableCells"][0][
"content"][0]["paragraph"]["elements"][0]
if debug:
jprint(unique_id_col)
assert "textRun" in unique_id_col, "Not the correct unique_id cell"
start_index = unique_id_col["startIndex"]
end_index = unique_id_col["endIndex"]
bold_ranges.append((start_index, end_index))
start_index, end_index = None, None # Reset
# The info column, messy parsing through. You can inspect the json output
# with jprint() to confirm/debug if broken.
info_elements = element["table"]["tableRows"][0]["tableCells"][1]["content"]
for i, info_col in enumerate(info_elements):
info_col = info_elements[i]
start_index = info_col["startIndex"]
content = info_col["paragraph"]["elements"][0]["textRun"]["content"]
# To find the end_index, run through and find something in targets.
for target in targets:
if content.find("{}:".format(target)) != -1:
# Contains the string "|target|:"
end_index = start_index + len(target) + 1
bold_ranges.append((start_index, end_index))
break
if debug:
jprint(info_col)
print("#" * 30)
return bold_ranges

@ -0,0 +1,120 @@
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for parser.py
"""
import unittest
import parser
import os
# Absolute path to chrome/src.
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "../../.."))
TESTS_DIR = os.path.join(SCRIPT_DIR, "test_data")
class ParserTest(unittest.TestCase):
TSV_CONTENTS = [
[
u"unique_id_A", u"", u"sender_A", u"description_A", u"trigger_A",
u"data_A", u"destination_A", u"cookies_allowed_A", u"cookies_store_A",
u"settings_A", u"chrome_policy_A", u"", u"source_file_A",
u"id_hash_code_A", u"content_hash_code_A"],
[
u"unique_id_B", u"", u"sender_B", u"description_B", u"trigger_B",
u"data_B", u"destination_B", u"cookies_allowed_B", u"cookies_store_B",
u"settings_B", u"chrome_policy_B", u"", u"source_file_B",
u"id_hash_code_B", u"content_hash_code_B"],
[
u"unique_id_C", u"", u"sender_C", u"description_C", u"trigger_C",
u"data_C", u"destination_C", u"cookies_allowed_C", u"cookies_store_C",
u"settings_C", u"chrome_policy_C", u"", u"source_file_C",
u"id_hash_code_C", u"content_hash_code_C"]
]
ANNOTATIONS_MAPPING = {
"unique_id_A": parser.TrafficAnnotation(**{
"unique_id": "unique_id_A",
"description": "description_A",
"trigger": "trigger_A",
"data": "data_A",
"settings": "settings_A",
"policy": "chrome_policy_A"}),
"unique_id_B": parser.TrafficAnnotation(**{
"unique_id": "unique_id_B",
"description": "description_B",
"trigger": "trigger_B",
"data": "data_B",
"settings": "settings_B",
"policy": "chrome_policy_B"}),
"unique_id_C": parser.TrafficAnnotation(**{
"unique_id": "unique_id_C",
"description": "description_C",
"trigger": "trigger_C",
"data": "data_C",
"settings": "settings_C",
"policy": "chrome_policy_C"})
}
PLACEHOLDERS = [
{"type": parser.Placeholder.GROUP, "name": "Group A"},
{"type": parser.Placeholder.SENDER, "name": "Sender 1"},
{
"type": parser.Placeholder.ANNOTATION,
"traffic_annotation": ANNOTATIONS_MAPPING["unique_id_A"]},
{"type": parser.Placeholder.SENDER, "name": "Sender 2"},
{
"type": parser.Placeholder.ANNOTATION,
"traffic_annotation": ANNOTATIONS_MAPPING["unique_id_B"]},
{"type": parser.Placeholder.GROUP, "name": "Group C"},
{"type": parser.Placeholder.SENDER, "name": "Sender 3"},
{
"type": parser.Placeholder.ANNOTATION,
"traffic_annotation": ANNOTATIONS_MAPPING["unique_id_C"]}
]
# Document formatted according to fake_grouping.xml
DOC_JSON = parser.extract_body(
target="all", json_file_path=os.path.join(TESTS_DIR, "fake_doc.json"))
def test_load_tsv_file(self):
self.assertEqual(self.TSV_CONTENTS, parser.load_tsv_file(os.path.join(
SRC_DIR,
"tools/traffic_annotation/scripts/test_data/fake_annotations.tsv"),
False))
def test_map_annotations(self):
self.assertEqual(
self.ANNOTATIONS_MAPPING, parser.map_annotations(self.TSV_CONTENTS))
def test_xml_parser_build_placeholders(self):
xml_parser = parser.XMLParser(
os.path.join(TESTS_DIR, "fake_grouping.xml"), self.ANNOTATIONS_MAPPING)
self.assertEqual(self.PLACEHOLDERS, xml_parser.build_placeholders())
def test_find_first_index(self):
first_index = parser.find_first_index(self.DOC_JSON)
self.assertEqual(1822, first_index)
def test_find_last_index(self):
last_index = parser.find_last_index(self.DOC_JSON)
self.assertEqual(2066, last_index)
def test_find_chrome_browser_version(self):
current_version = parser.find_chrome_browser_version(self.DOC_JSON)
self.assertEqual("86.0.4187.0", current_version)
def test_find_bold_ranges(self):
expected_bold_ranges = [
(1843, 1855), (1859, 1867), (1871, 1876), (1880, 1889), (1893, 1900),
(1918, 1930), (1934, 1942), (1968, 1975), (1946, 1951), (1955, 1964),
(2001, 2013), (2017, 2025), (2029, 2034), (2038, 2047), (2051, 2058)]
bold_ranges = parser.find_bold_ranges(self.DOC_JSON)
self.assertItemsEqual(expected_bold_ranges, bold_ranges)
if __name__ == "__main__":
unittest.main()

@ -0,0 +1,3 @@
unique_id_A sender_A description_A trigger_A data_A destination_A cookies_allowed_A cookies_store_A settings_A chrome_policy_A source_file_A id_hash_code_A content_hash_code_A
unique_id_B sender_B description_B trigger_B data_B destination_B cookies_allowed_B cookies_store_B settings_B chrome_policy_B source_file_B id_hash_code_B content_hash_code_B
unique_id_C sender_C description_C trigger_C data_C destination_C cookies_allowed_C cookies_store_C settings_C chrome_policy_C source_file_C id_hash_code_C content_hash_code_C
1 unique_id_A sender_A description_A trigger_A data_A destination_A cookies_allowed_A cookies_store_A settings_A chrome_policy_A source_file_A id_hash_code_A content_hash_code_A
2 unique_id_B sender_B description_B trigger_B data_B destination_B cookies_allowed_B cookies_store_B settings_B chrome_policy_B source_file_B id_hash_code_B content_hash_code_B
3 unique_id_C sender_C description_C trigger_C data_C destination_C cookies_allowed_C cookies_store_C settings_C chrome_policy_C source_file_C id_hash_code_C content_hash_code_C

@ -0,0 +1,15 @@
<groups>
<group name="Group A">
<sender name="Sender 1">
<traffic_annotation unique_id="unique_id_A"/>
</sender>
<sender name="Sender 2">
<traffic_annotation unique_id="unique_id_B"/>
</sender>
</group>
<group name="Group C">
<sender name="Sender 3">
<traffic_annotation unique_id="unique_id_C"/>
</sender>
</group>
</groups>

@ -0,0 +1,552 @@
#!/usr/bin/env vpython
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script updates the Chrome Browser Network Traffic Annotations document.
To run the script, you should first generate annotations.tsv using
traffic_annotation_auditor.
To run the script, call: `update_annotations_doc --config-file=[config.json]
--annotations-file=[path_to_annotations.tsv]`
Run `update_annotations_doc --config-help` for help on the config.json
configuration file.
"""
from __future__ import print_function
import argparse
import datetime
import httplib2
import time
import json
import sys
import os
from apiclient import discovery
from infra_libs import luci_auth
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import parser
from parser import (XMLParser, map_annotations, load_tsv_file, Placeholder,
PLACEHOLDER_STYLES)
# Absolute path to chrome/src.
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "../../.."))
class NetworkTrafficAnnotationsDoc:
SCOPES = "https://www.googleapis.com/auth/documents"
APPLICATION_NAME = "Chrome Network Traffic Annotations Document Updater"
# Colors are given as RGB percentages
BLUE = {"red": 0.812, "green": 0.886, "blue": 0.953}
WHITE = {"red": 1.0, "green": 1.0, "blue": 1.0}
def __init__(self,
doc_id,
doc_name,
credentials_file_path,
client_token_file_path,
verbose,
index=None):
"""
Args:
doc_id: str
ID of the annotations document for clients. This is the destination
document where updates are made.
doc_name: str
Name of the document that contains the annotations for clients.
credentials_file_path: str
Path relative to src to read user credentials (credentials.json).
client_token_file_path: str
Path relative to src to read/save user credentials (token.pickle).
verbose: bool
Flag requesting dump of API status calls.
index: int
Where to begin adding content to. If index=None, will automatically
find the index corresponding to the end of the template file.
"""
self.destination_id = doc_id
self.doc_name = doc_name
self.index = index
self._docs_service = None
self._color_bool = True
self._credentials_file_path = credentials_file_path
self._client_token_file_path = client_token_file_path
self.verbose = verbose
def update_doc(self, placeholders):
"""Updates the chrome version of the destination document and includes all
the annotations within the grouping.xml file.
Args:
placeholders:
Contains the order of the placeholders to construct template
"""
self._docs_service = self._initialize_service(
self._get_credentials(
self._credentials_file_path, self._client_token_file_path))
doc = self._get_doc_contents(self.destination_id)
self._update_chrome_version(doc)
self.index = self._clear_destination_contents(doc)
self._insert_placeholders(placeholders)
self._to_all_bold()
if self.verbose:
self._get_doc_contents(self.destination_id, save=True)
print("Done, please review the document before sharing it with clients.")
def _initialize_service(self, credentials):
"""Initializes the Google Docs API services.
Args:
credentials: OAuth2Credentials user credentials.
The path to the user's credentials.
Returns:
googleapiclient.discovery.Resource Doc API service, v1.
"""
http = credentials.authorize(httplib2.Http())
return discovery.build("docs", "v1", http=http)
def _get_credentials(self, credentials_file_path, client_token_file_path):
""" Gets valid user credentials from storage. If nothing has been stored, or
if the stored credentials are invalid, the OAuth2 flow is completed to
obtain the new credentials.
When running in the buildbot, uses LUCI credentials instead.
Args:
credentials_file_path: str
Absolute path to read credentials.json.
client_token_file_path: str
Absolute path to read/save user secret token.
Returns:
OAuth2Credentials The obtained user credentials.
"""
if luci_auth.available():
return luci_auth.LUCICredentials(scopes=[self.SCOPES])
store = Storage(os.path.join(SRC_DIR, client_token_file_path))
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
os.path.join(SRC_DIR, credentials_file_path), self.SCOPES)
flow.user_agent = self.APPLICATION_NAME
flags = tools.argparser.parse_args([])
credentials = tools.run_flow(flow, store, flags)
print("Storing credentials to " + credentials_file_path)
return credentials
def _get_doc_contents(self, document_id, save=False):
document = self._docs_service.documents().get(
documentId=document_id).execute()
if save:
with open(os.path.join(SRC_DIR,
"tools/traffic_annotation/scripts/template.json"), "w") as out_file:
json.dump(document, out_file)
print("Saved template.json.")
if self.verbose:
print(document)
return document
def _update_chrome_version(self, doc):
"""Gets the chrome version (MAJOR.MINOR.BUILD.PATCH) from src/chrome/VERSION
and updates the doc to reflect the correct version.
"""
version = ""
with open(os.path.join(SRC_DIR, "chrome/VERSION"), "r") as version_file:
version = ".".join(line.strip().split("=")[1]
for line in version_file.readlines())
current_version = parser.find_chrome_browser_version(doc)
replacement = "Chrome Browser version {}".format(version)
target = "Chrome Browser version {}".format(current_version)
if replacement == target:
print("Document chrome version is already up to date.")
return
req = [{
"replaceAllText": {
"containsText": {
"text": target,
"matchCase": True
},
"replaceText": replacement
}
}]
self._perform_requests(req)
print("Updated document chrome version {} --> {}".format(
current_version, version))
def _clear_destination_contents(self, doc):
"""Will clear the contents of the destination document from the end of the
"Introduction" section onwards.
Return: Integer of where to start writing, i.e. the index.
"""
print("Overwriting the destination document.")
first_index = parser.find_first_index(doc)
last_index = parser.find_last_index(doc)
if self.verbose:
print("First index, last index", first_index, last_index)
if first_index >= last_index:
print("Nothing to overwrite.")
return first_index
req = [{
"deleteContentRange": {
"range": {
"startIndex": first_index,
"endIndex": last_index
}
}
}]
self._perform_requests(req)
return first_index
def _perform_requests(self, reqs):
"""Performs the requests |reqs| using batch update.
"""
if not reqs:
print("Warning, no requests provided. Returning.")
return
status = self._docs_service.documents().batchUpdate(
body={
"requests": reqs
}, documentId=self.destination_id, fields="").execute()
if self.verbose:
print("#"*30)
print(status)
print("#"*30)
return status
def _insert_placeholders(self, placeholders):
"""Placeholders (e.g. groups, senders, traffic annotations) are inserted in
the document in their order of appearance.
Increment the self.index value to ensure that placeholders are inserted at
the correct locations. Because placeholders are sorted in order of
appearance, self.index is strictly increasing.
"""
reqs = []
for placeholder in placeholders:
placeholder_type = placeholder["type"]
if placeholder_type == Placeholder.ANNOTATION:
req, index = self._create_annotation_request(
placeholder["traffic_annotation"],
self.index,
color=self._color_bool)
self._color_bool = not self._color_bool
else:
# is either a group or sender placeholder
req, index = self._create_group_or_sender_request(
placeholder["name"], self.index, placeholder_type)
reqs += req
self.index += index
status = self._perform_requests(reqs)
print("Added all {} placeholders!\n".format(len(placeholders)))
def _create_text_request(self, text, index):
"""
Returns:
The request to insert raw text without formatting and the length of the
text for appropriately incrementing |self.index|.
"""
return {
"insertText": {
"location": {"index": index},
"text": text
}
}, len(text)
def _format_text(self, start_index, end_index, placeholder_type):
"""Format the text in between |start_index| and |end_index| using the styles
specified by |parser.PLACEHOLDER_STYLES|.
Returns: The request to format the text in between |start_index| and
|end_index|.
"""
return {
"updateTextStyle": {
"range": {
"startIndex": start_index,
"endIndex": end_index
},
"textStyle": {
"bold": PLACEHOLDER_STYLES[placeholder_type]["bold"],
"fontSize": {
"magnitude":
PLACEHOLDER_STYLES[placeholder_type]["fontSize"],
"unit": "PT"
},
"weightedFontFamily": {
"fontFamily": PLACEHOLDER_STYLES[placeholder_type]["font"],
"weight": 400
}
},
"fields": "*"
}
}
def _create_group_or_sender_request(self, text, index, placeholder_type):
"""Returns the request for inserting the group or sender placeholders using
the styling of |parser.PLACEHOLDER_STYLES|.
"""
assert placeholder_type in [Placeholder.GROUP, Placeholder.SENDER]
text += "\n"
req, idx = self._create_text_request(text, index)
reqs = [req]
reqs.append({
"updateParagraphStyle": {
"range": {
"startIndex": index,
"endIndex": index + idx
},
"paragraphStyle": {
"namedStyleType":
PLACEHOLDER_STYLES[placeholder_type]["namedStyleType"],
"direction": "LEFT_TO_RIGHT",
"spacingMode": "NEVER_COLLAPSE",
"spaceAbove": {"unit": "PT"}
},
"fields": "*"
}
})
reqs.append(self._format_text(index, index + idx, placeholder_type))
return reqs, idx
def _create_annotation_request(self, traffic_annotation, index, color=False):
"""Returns the request (dict) for inserting the annotations table. Refer to
the template document for a visual.
Args:
traffic_annotation: parser.TrafficAnnotation
The TrafficAnnotation object with all the relevant information, e.g.
unique_id, description, etc.
index: int
Where the annotation should be added in the document.
color: bool
If True, make the table blue, otherwise white.
"""
# Hardcoded due to intrinsic of tables in Google Docs API.
idx = 8
offset = 2
# Create the 1x2 table -- col 1 contains the unique_id placeholder, col 2
# contains the remaining placeholders, e.g. trigger, description, etc.
padding_req, _ = self._create_text_request("\n", index)
reqs = [padding_req]
reqs.append({
"insertTable": {
"rows": 1,
"columns": 2,
"location": {"index": index}
}
})
# Writing the annotation's relevant information directly to the table,
# within the left cell |left_text| and the right cell |right_text|.
left_text = traffic_annotation.unique_id
right_text = "{}\nTrigger: {}\nData: {}\nSettings: {}\nPolicy: {}".format(
traffic_annotation.description, traffic_annotation.trigger,
traffic_annotation.data, traffic_annotation.settings,
traffic_annotation.policy)
# +4 hardcoded due to intrinsic of tables in Google Docs API.
start_index = index + 4
left_req, left_increment = self._create_text_request(left_text, start_index)
right_req, right_increment = self._create_text_request(
right_text, start_index + left_increment + offset)
reqs.append(left_req)
reqs.append(right_req)
end_index = index + left_increment + right_increment + idx
# This sizes the table correctly such as making the right cell's width
# greater than that of the left cell.
col_properties = [{
"columnIndices": [0],
"width": 153
}, {
"columnIndices": [1],
"width": 534
}]
for properties in col_properties:
reqs.append({
"updateTableColumnProperties": {
"tableStartLocation": {"index": index + 1},
"columnIndices": properties["columnIndices"],
"fields": "*",
"tableColumnProperties": {
"widthType": "FIXED_WIDTH",
"width": {
"magnitude": properties["width"],
"unit": "PT"
}
}
}
})
# Changing the table's color and ensuring that the borders are "turned off"
# (really they're given the same color as the background).
color = self.BLUE if color else self.WHITE
color_and_border_req = {
"updateTableCellStyle": {
"tableStartLocation": {"index": index + 1},
"fields": "*",
"tableCellStyle": {
"rowSpan": 1,
"columnSpan": 1,
"backgroundColor": {
"color": {
"rgbColor": color
}
}
}
}
}
# make the table borders 'invisible' and adjust the padding to site text in
# the cell correctly.
for direction in ["Left", "Right", "Top", "Bottom"]:
color_and_border_req["updateTableCellStyle"]["tableCellStyle"][
"border" + direction] = {
"color": {"color": {"rgbColor": color}},
"width": {"unit": "PT"},
"dashStyle": "SOLID"
}
color_and_border_req["updateTableCellStyle"]["tableCellStyle"][
"padding" + direction] = {"magnitude": 1.44, "unit": "PT"}
reqs.append(color_and_border_req)
# Text formatting (normal text, linespacing, etc.) adds space below the
# lines within a cell.
reqs.append({
"updateParagraphStyle": {
"range": {
"startIndex": start_index,
"endIndex": end_index - 1
},
"paragraphStyle": {
"namedStyleType": "NORMAL_TEXT",
"lineSpacing": 100,
"direction": "LEFT_TO_RIGHT",
"spacingMode": "NEVER_COLLAPSE",
"spaceBelow": {"magnitude": 4, "unit": "PT"},
"avoidWidowAndOrphan": False
},
"fields": "*"
}
})
reqs.append(
self._format_text(start_index, end_index - 1, Placeholder.ANNOTATION))
return reqs, end_index - index
def _to_bold(self, start_index, end_index):
"""Bold the text between start_index and end_index. Uses the same formatting
as the annotation bold."""
return self._format_text(start_index, end_index,
Placeholder.ANNOTATION_BOLD)
def _to_all_bold(self):
"""Bold the unique_id, description, trigger, etc. in the tables to
correspond exactly to the template."""
# Get recent doc after all the substitutions with the annotations. At this
# point, document has all the content.
print("Finding everything to bold...")
doc = self._get_doc_contents(self.destination_id)
# the ranges to bold using the updateTextStyle request
bold_ranges = parser.find_bold_ranges(doc)
reqs = []
for i, (start_index, end_index) in enumerate(bold_ranges):
if end_index > start_index:
reqs.append(self._to_bold(start_index, end_index))
self._perform_requests(reqs)
def print_config_help():
print("The config.json file should have the following items:\n"
"doc_id:\n"
" ID of the destination document.\n"
"doc_name:\n"
" Name of the document.\n"
"credentials_file_path:\n"
" Absolute path of the file that keeps user credentials.\n"
"client_token_file_path:\n"
" Absolute path of the token.pickle which keeps the users credentials."
" The file can be created as specified in:\n"
" https://developers.google.com/docs/api/quickstart/python")
def main():
args_parser = argparse.ArgumentParser(
description="Updates 'Chrome Browser Network Traffic Annotations' doc.")
args_parser.add_argument("--config-file", help="Configurations file.")
args_parser.add_argument("--annotations-file",
help="TSV annotations file exported from auditor.")
args_parser.add_argument("--verbose",
action="store_true",
help="Reports all updates. "
" Also creates a scripts/template.json file "
" outlining the document's current structure.")
args_parser.add_argument("--config-help",
action="store_true",
help="Shows the configurations help.")
args = args_parser.parse_args()
if args.config_help:
print_config_help()
return 0
# Load and parse config file.
with open(os.path.join(SRC_DIR, args.config_file)) as config_file:
config = json.load(config_file)
tsv_contents = load_tsv_file(
os.path.join(SRC_DIR, args.annotations_file), False)
if not tsv_contents:
print("Could not read annotations file.")
return -1
xml_parser = XMLParser(
os.path.join(SRC_DIR, "tools/traffic_annotation/summary/grouping.xml"),
map_annotations(tsv_contents))
placeholders = xml_parser.build_placeholders()
print("#" * 40)
print("There are:", len(placeholders), "placeholders")
if args.verbose:
print(placeholders)
print("#" * 40)
network_traffic_doc = NetworkTrafficAnnotationsDoc(
doc_id=config["doc_id"],
doc_name=config["doc_name"],
credentials_file_path=config["credentials_file_path"],
client_token_file_path=config["client_token_file_path"],
verbose=args.verbose)
if not network_traffic_doc.update_doc(placeholders):
return -1
return 0
if __name__ == "__main__":
sys.exit(main())

@ -0,0 +1,61 @@
python_version: "2.7"
wheel: <
name: "infra/python/wheels/google_api_python_client-py2_py3"
version: "version:1.6.2"
>
wheel: <
name: "infra/python/wheels/oauth2client-py2_py3"
version: "version:4.0.0"
>
wheel: <
name: "infra/python/wheels/uritemplate-py2_py3"
version: "version:3.0.0"
>
wheel: <
name: "infra/python/wheels/enum34-py2"
version: "version:1.1.6"
>
wheel: <
name: "infra/python/wheels/httplib2-py2_py3"
version: "version:0.12.1"
>
wheel: <
name: "infra/python/wheels/rsa-py2_py3"
version: "version:3.4.2"
>
wheel: <
name: "infra/python/wheels/pyasn1-py2_py3"
version: "version:0.2.3"
>
wheel: <
name: "infra/python/wheels/pyasn1_modules-py2_py3"
version: "version:0.0.8"
>
wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.10.0"
>
wheel: <
name: "infra/python/wheels/infra_libs-py2"
version: "version:2.0.0"
>
wheel: <
name: "infra/python/wheels/protobuf-py2_py3"
version: "version:3.2.0"
>
wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>

@ -0,0 +1,175 @@
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for update_annotations_doc.py
"""
import os
import sys
import unittest
from mock import MagicMock
# Mock some imports which aren't necessary during testing.
sys.modules["infra_libs"] = MagicMock()
import update_annotations_doc
import parser
# Absolute path to chrome/src.
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "../../.."))
TESTS_DIR = os.path.join(SCRIPT_DIR, "test_data")
class UpdateAnnotationsDocTest(unittest.TestCase):
network_doc_obj = update_annotations_doc.NetworkTrafficAnnotationsDoc(
"", "", "", "", "")
def test_create_group_request(self):
text = "TestGroup"
req, index = self.network_doc_obj._create_group_or_sender_request(
text, 0, parser.Placeholder.GROUP)
self.assertEqual(len(text)+1, index)
expected_req = [
{"insertText": {"text": "TestGroup\n", "location": {"index": 0}}},
{"updateParagraphStyle": {
"fields": "*",
"range": {"endIndex": 10, "startIndex": 0},
"paragraphStyle": {
"spacingMode": "NEVER_COLLAPSE",
"direction": "LEFT_TO_RIGHT",
"namedStyleType": "HEADING_1",
"spaceAbove": {"unit": "PT"}
}
}
},
{"updateTextStyle": {
"textStyle": {
"fontSize": {"magnitude": 20, "unit": "PT"},
"bold": False,
"weightedFontFamily": {
"fontFamily": "Roboto",
"weight": 400
}
},
"range": {"endIndex": 10, "startIndex": 0}, "fields": "*"}}
]
self.assertEqual(expected_req, req)
def test_create_sender_request(self):
text = "TestSender"
print(text)
req, index = self.network_doc_obj._create_group_or_sender_request(
text, 0, parser.Placeholder.SENDER)
self.assertEqual(len(text)+1, index)
expected_req = [
{"insertText": {"text": "TestSender\n", "location": {"index": 0}}},
{"updateParagraphStyle": {
"fields": "*",
"range": {"endIndex": 11, "startIndex": 0},
"paragraphStyle": {
"spacingMode": "NEVER_COLLAPSE",
"direction": "LEFT_TO_RIGHT",
"namedStyleType": "HEADING_2",
"spaceAbove": {"unit": "PT"}
}
}
},
{"updateTextStyle": {
"textStyle": {"fontSize": {"magnitude": 14, "unit": "PT"},
"bold": True,
"weightedFontFamily": {"fontFamily": "Roboto", "weight": 400}},
"range": {"endIndex": 11, "startIndex": 0}, "fields": "*"}
}
]
self.assertEqual(expected_req, req)
def test_create_annotation_request(self):
traffic_annotation = parser.TrafficAnnotation(**{
"unique_id": "unique_id_A",
"description": "description_A",
"trigger": "trigger_A",
"data": "data_A",
"settings": "settings_A",
"policy": "chrome_policy_A"})
req, index = self.network_doc_obj._create_annotation_request(
traffic_annotation, 0)
self.assertEqual(109, index)
expected_req = [
{'insertText': {'text': '\n', 'location': {'index': 0}}},
{'insertTable': {'rows': 1, 'location': {'index': 0}, 'columns': 2}},
{'insertText': {'text': 'unique_id_A', 'location': {'index': 4}}},
{
'insertText': {
'text': "description_A\nTrigger: trigger_A\nData: data_A\nSettings: "
"settings_A\nPolicy: chrome_policy_A", 'location': {'index': 17}}},
{'updateTableColumnProperties': {
'columnIndices': [0],
'fields': '*',
'tableColumnProperties': {
'width': {'magnitude': 153, 'unit': 'PT'},
'widthType': 'FIXED_WIDTH'},
'tableStartLocation': {'index': 1}}},
{'updateTableColumnProperties': {
'columnIndices': [1],
'fields': '*',
'tableColumnProperties': {
'width': {'magnitude': 534, 'unit': 'PT'},'widthType': 'FIXED_WIDTH'},
'tableStartLocation': {'index': 1}}},
{'updateTableCellStyle': {
'fields': '*',
'tableCellStyle': {
'rowSpan': 1,
'borderBottom': {
'color': {
'color': {'rgbColor': {'blue': 1.0, 'green': 1.0, 'red': 1.0}}},
'width': {'unit': 'PT'}, 'dashStyle': 'SOLID'},
'paddingBottom': {'magnitude': 1.44, 'unit': 'PT'},
'paddingLeft': {'magnitude': 1.44, 'unit': 'PT'},
'paddingTop': {'magnitude': 1.44, 'unit': 'PT'},
'borderLeft': {
'color': {
'color': {'rgbColor': {'blue': 1.0, 'green': 1.0, 'red': 1.0}}},
'width': {'unit': 'PT'},
'dashStyle': 'SOLID'},
'columnSpan': 1,
'backgroundColor': {
'color': {'rgbColor': {'blue': 1.0, 'green': 1.0, 'red': 1.0}}},
'borderRight': {
'color': {
'color': {'rgbColor': {'blue': 1.0, 'green': 1.0, 'red': 1.0}}},
'width': {'unit': 'PT'},
'dashStyle': 'SOLID'},
'borderTop': {
'color': {
'color': {'rgbColor': {'blue': 1.0, 'green': 1.0, 'red': 1.0}}},
'width': {'unit': 'PT'},
'dashStyle': 'SOLID'},
'paddingRight': {'magnitude': 1.44, 'unit': 'PT'}},
'tableStartLocation': {'index': 1}}},
{'updateParagraphStyle': {
'fields': '*',
'range': {'endIndex': 108, 'startIndex': 4},
'paragraphStyle': {
'spacingMode': 'NEVER_COLLAPSE',
'direction': 'LEFT_TO_RIGHT',
'spaceBelow': {'magnitude': 4, 'unit': 'PT'},
'lineSpacing': 100,
'avoidWidowAndOrphan': False,
'namedStyleType': 'NORMAL_TEXT'}}},
{'updateTextStyle': {
'textStyle': {'fontSize': {'magnitude': 9, 'unit': 'PT'},
'bold': False,
'weightedFontFamily': {'fontFamily': 'Roboto', 'weight': 400}},
'range': {'endIndex': 108, 'startIndex': 4}, 'fields': '*'}}]
self.assertEqual(expected_req, req)
if __name__ == "__main__":
unittest.main()

@ -29,6 +29,7 @@ from infra_libs import luci_auth
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from parser import load_tsv_file
class SheetEditor():
@ -336,37 +337,6 @@ class SheetEditor():
self.insert_count, self.update_count, self.delete_count)
def utf_8_encoder(input_file):
for line in input_file:
yield line.encode("utf-8")
def LoadTSVFile(file_path, verbose):
""" Loads annotations TSV file.
Args:
file_path: str Path to the TSV file.
verbose: bool Whether to print messages about ignored rows.
Returns:
list of list Table of loaded annotations.
"""
rows = []
with io.open(file_path, mode="r", encoding="utf-8") as csvfile:
# CSV library does not support unicode, so encoding to utf-8 and back.
reader = csv.reader(utf_8_encoder(csvfile), delimiter='\t')
for row in reader:
row = [unicode(col, 'utf-8') for col in row]
# If the last column of the file_row is empty, the row belongs to a
# platform different from the one that TSV file is generated on, hence it
# should be ignored.
if row[-1]:
rows.append(row)
elif verbose:
print("Ignored from other platforms: %s" % row[0])
return rows
def PrintConfigHelp():
print("The config.json file should have the following items:\n"
"spreadsheet_id:\n"
@ -416,7 +386,7 @@ def main():
config = json.load(config_file)
# Load and parse annotations file.
file_content = LoadTSVFile(args.annotations_file, args.verbose)
file_content = load_tsv_file(args.annotations_file, args.verbose)
if not file_content:
print("Could not read annotations file.")
return -1