+ resources.extend(data)
+
+ # fetch the next page from headers, do not count pages
+ # http://engineering.hackerearth.com/2014/08/21/python-requests-module/
+ if "next" in resp.links:
+ url = resp.links['next']['url']
+ log(2, "Found next link for Github pagination: " + url)
+ else:
+ break # no link found, we are done
+ log(2, "No more pages to fetch, stop.")
+
+ return resources
+
+def issue_type(issue):
+ issue_labels = [label["name"] for label in issue["labels"]]
+
+ # start with the least important first (e.g. "Support", "Documentation", "Bug", "Enhancement" as order)
+ for category in reversed(categories):
+ labels = categories[category]
+
+ for label in labels:
+ if label in issue_labels:
+ return category
+
+ return "Support"
+
+def escape_markdown(text):
+ #tmp = text.replace('&', '&').replace('<', '<').replace('>', '>')
+ tmp = text
+ tmp.replace('\\', '\\\\')
+
+ return re.sub("([<>*_()\[\]#])", r"\\\1", tmp)
+
+def format_labels(issue):
+ labels = filter(lambda label: label not in ignored_labels, [label["name"] for label in issue["labels"]])
+
+ # Mark PRs as custom label
+ if "pull_request" in issue:
+ labels.append("PR")
+
+ if len(labels):
+ return " (" + ", ".join(labels) + ")"
+ else:
+ return ""
+
+def format_title(title):
+ # Fix encoding
+ try:
+ issue_title = str(title.encode('ascii', 'ignore').encode('utf-8'))
+ except Error:
+ log(1, "Error: Cannot convert " + title + " to UTF-8")
+
+ # Remove dev.icinga.com tag
+ issue_title = re.sub('\[dev\.icinga\.com #\d+\] ', '', issue_title)
+
+ #log(1, "Issue title: " + issue_title + "Type: " + str(type(issue_title)))
+
+ return escape_markdown(issue_title)
+
+#################################
+## MAIN
+
+milestones = {}
+issues = defaultdict(lambda: defaultdict(list))
+
+log(1, "Fetching data from GitHub API for project " + project_name)
+
+try:
+ tickets = fetch_github_resources("/issues", { "state": "all" })
+except requests.exceptions.HTTPError as e:
+ log(1, "ERROR " + str(e.response.status_code) + ": " + e.response.text)
+
+ sys.exit(1)
+
+clfp = open(changelog_file, "w+")
+
+with open('tickets.pickle', 'wb') as fp:
+ pickle.dump(tickets, fp)
+
+with open('tickets.pickle', 'rb') as fp:
+ cached_issues = pickle.load(fp)
+
+for issue in cached_issues: #fetch_github_resources("/issues", { "state": "all" }):
+ milestone = issue["milestone"]
+
+ if not milestone:
+ continue
+
+ ms_title = milestone["title"]
+
+ if not re.match('^\d+\.\d+\.\d+$', ms_title):
+ continue