_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q268900
MattermostClient.user
test
def user(self, user): """Fetch user data.""" entrypoint = self.RUSERS + '/' + user response = self._fetch(entrypoint, None) return response
python
{ "resource": "" }
q268901
RSS.fetch
test
def fetch(self, category=CATEGORY_ENTRY): """Fetch the entries from the url. The method retrieves all entries from a RSS url :param category: the category of items to fetch :returns: a generator of entries """ kwargs = {} items = super().fetch(category, **kwargs) return items
python
{ "resource": "" }
q268902
RSS.fetch_items
test
def fetch_items(self, category, **kwargs): """Fetch the entries :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ logger.info("Looking for rss entries at feed '%s'", self.url) nentries = 0 # number of entries raw_entries = self.client.get_entries() entries = self.parse_feed(raw_entries)['entries'] for item in entries: yield item nentries += 1 logger.info("Total number of entries: %i", nentries)
python
{ "resource": "" }
q268903
RSSCommand.setup_cmd_parser
test
def setup_cmd_parser(cls): """Returns the RSS argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, archive=True) # Required arguments parser.parser.add_argument('url', help="URL of the RSS feed") return parser
python
{ "resource": "" }
q268904
BugzillaREST.fetch
test
def fetch(self, category=CATEGORY_BUG, from_date=DEFAULT_DATETIME): """Fetch the bugs from the repository. The method retrieves, from a Bugzilla repository, the bugs updated since the given date. :param category: the category of items to fetch :param from_date: obtain bugs updated since this date :returns: a generator of bugs """ if not from_date: from_date = DEFAULT_DATETIME kwargs = {'from_date': from_date} items = super().fetch(category, **kwargs) return items
python
{ "resource": "" }
q268905
BugzillaRESTClient.bugs
test
def bugs(self, from_date=DEFAULT_DATETIME, offset=None, max_bugs=MAX_BUGS): """Get the information of a list of bugs. :param from_date: retrieve bugs that where updated from that date; dates are converted to UTC :param offset: starting position for the search; i.e to return 11th element, set this value to 10. :param max_bugs: maximum number of bugs to reteurn per query """ date = datetime_to_utc(from_date) date = date.strftime("%Y-%m-%dT%H:%M:%SZ") params = { self.PLAST_CHANGE_TIME: date, self.PLIMIT: max_bugs, self.PORDER: self.VCHANGE_DATE_ORDER, self.PINCLUDE_FIELDS: self.VINCLUDE_ALL } if offset: params[self.POFFSET] = offset response = self.call(self.RBUG, params) return response
python
{ "resource": "" }
q268906
BugzillaRESTClient.comments
test
def comments(self, *bug_ids): """Get the comments of the given bugs. :param bug_ids: list of bug identifiers """ # Hack. The first value must be a valid bug id resource = urijoin(self.RBUG, bug_ids[0], self.RCOMMENT) params = { self.PIDS: bug_ids } response = self.call(resource, params) return response
python
{ "resource": "" }
q268907
BugzillaRESTClient.history
test
def history(self, *bug_ids): """Get the history of the given bugs. :param bug_ids: list of bug identifiers """ resource = urijoin(self.RBUG, bug_ids[0], self.RHISTORY) params = { self.PIDS: bug_ids } response = self.call(resource, params) return response
python
{ "resource": "" }
q268908
BugzillaRESTClient.attachments
test
def attachments(self, *bug_ids): """Get the attachments of the given bugs. :param bug_id: list of bug identifiers """ resource = urijoin(self.RBUG, bug_ids[0], self.RATTACHMENT) params = { self.PIDS: bug_ids, self.PEXCLUDE_FIELDS: self.VEXCLUDE_ATTCH_DATA } response = self.call(resource, params) return response
python
{ "resource": "" }
q268909
GitLab.__get_issue_notes
test
def __get_issue_notes(self, issue_id): """Get issue notes""" notes = [] group_notes = self.client.notes(GitLabClient.ISSUES, issue_id) for raw_notes in group_notes: for note in json.loads(raw_notes): note_id = note['id'] note['award_emoji_data'] = \ self.__get_note_award_emoji(GitLabClient.ISSUES, issue_id, note_id) notes.append(note) return notes
python
{ "resource": "" }
q268910
GitLab.__fetch_merge_requests
test
def __fetch_merge_requests(self, from_date): """Fetch the merge requests""" merges_groups = self.client.merges(from_date=from_date) for raw_merges in merges_groups: merges = json.loads(raw_merges) for merge in merges: merge_id = merge['iid'] if self.blacklist_ids and merge_id in self.blacklist_ids: logger.warning("Skipping blacklisted merge request %s", merge_id) continue # The single merge_request API call returns a more # complete merge request, thus we inflate it with # other data (e.g., notes, emojis, versions) merge_full_raw = self.client.merge(merge_id) merge_full = json.loads(merge_full_raw) self.__init_merge_extra_fields(merge_full) merge_full['notes_data'] = self.__get_merge_notes(merge_id) merge_full['award_emoji_data'] = self.__get_award_emoji(GitLabClient.MERGES, merge_id) merge_full['versions_data'] = self.__get_merge_versions(merge_id) yield merge_full
python
{ "resource": "" }
q268911
GitLab.__get_merge_notes
test
def __get_merge_notes(self, merge_id): """Get merge notes""" notes = [] group_notes = self.client.notes(GitLabClient.MERGES, merge_id) for raw_notes in group_notes: for note in json.loads(raw_notes): note_id = note['id'] note['award_emoji_data'] = \ self.__get_note_award_emoji(GitLabClient.MERGES, merge_id, note_id) notes.append(note) return notes
python
{ "resource": "" }
q268912
GitLab.__get_merge_versions
test
def __get_merge_versions(self, merge_id): """Get merge versions""" versions = [] group_versions = self.client.merge_versions(merge_id) for raw_versions in group_versions: for version in json.loads(raw_versions): version_id = version['id'] version_full_raw = self.client.merge_version(merge_id, version_id) version_full = json.loads(version_full_raw) version_full.pop('diffs', None) versions.append(version_full) return versions
python
{ "resource": "" }
q268913
GitLabClient.merges
test
def merges(self, from_date=None): """Get the merge requests from pagination""" payload = { 'state': 'all', 'order_by': 'updated_at', 'sort': 'asc', 'view': 'simple', 'per_page': PER_PAGE } if from_date: payload['updated_after'] = from_date.isoformat() return self.fetch_items(GitLabClient.MERGES, payload)
python
{ "resource": "" }
q268914
GitLabClient.merge
test
def merge(self, merge_id): """Get the merge full data""" path = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, GitLabClient.MERGES, merge_id) response = self.fetch(path) return response.text
python
{ "resource": "" }
q268915
GitLabClient.merge_versions
test
def merge_versions(self, merge_id): """Get the merge versions from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(GitLabClient.MERGES, str(merge_id), GitLabClient.VERSIONS) return self.fetch_items(path, payload)
python
{ "resource": "" }
q268916
GitLabClient.merge_version
test
def merge_version(self, merge_id, version_id): """Get merge version detail""" path = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, GitLabClient.MERGES, merge_id, GitLabClient.VERSIONS, version_id) response = self.fetch(path) return response.text
python
{ "resource": "" }
q268917
GitLabClient.notes
test
def notes(self, item_type, item_id): """Get the notes from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.NOTES) return self.fetch_items(path, payload)
python
{ "resource": "" }
q268918
GitLabClient.emojis
test
def emojis(self, item_type, item_id): """Get emojis from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.EMOJI) return self.fetch_items(path, payload)
python
{ "resource": "" }
q268919
GitLabClient.note_emojis
test
def note_emojis(self, item_type, item_id, note_id): """Get emojis of a note""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.NOTES, str(note_id), GitLabClient.EMOJI) return self.fetch_items(path, payload)
python
{ "resource": "" }
q268920
GitLabClient.calculate_time_to_reset
test
def calculate_time_to_reset(self): """Calculate the seconds to reset the token requests, by obtaining the different between the current date and the next date when the token is fully regenerated. """ time_to_reset = self.rate_limit_reset_ts - (datetime_utcnow().replace(microsecond=0).timestamp() + 1) if time_to_reset < 0: time_to_reset = 0 return time_to_reset
python
{ "resource": "" }
q268921
GitLabClient.fetch_items
test
def fetch_items(self, path, payload): """Return the items from GitLab API using links pagination""" page = 0 # current page last_page = None # last page url_next = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, path) logger.debug("Get GitLab paginated items from " + url_next) response = self.fetch(url_next, payload=payload) items = response.text page += 1 if 'last' in response.links: last_url = response.links['last']['url'] last_page = last_url.split('&page=')[1].split('&')[0] last_page = int(last_page) logger.debug("Page: %i/%i" % (page, last_page)) while items: yield items items = None if 'next' in response.links: url_next = response.links['next']['url'] # Loving requests :) response = self.fetch(url_next, payload=payload) page += 1 items = response.text logger.debug("Page: %i/%i" % (page, last_page))
python
{ "resource": "" }
q268922
GitLabClient._init_rate_limit
test
def _init_rate_limit(self): """Initialize rate limit information""" url = urijoin(self.base_url, 'projects', self.owner + '%2F' + self.repository) try: response = super().fetch(url) self.update_rate_limit(response) except requests.exceptions.HTTPError as error: if error.response.status_code == 401: raise error else: logger.warning("Rate limit not initialized: %s", error)
python
{ "resource": "" }
q268923
GitLabCommand.setup_cmd_parser
test
def setup_cmd_parser(cls): """Returns the GitLab argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True, archive=True) # GitLab options group = parser.parser.add_argument_group('GitLab arguments') group.add_argument('--enterprise-url', dest='base_url', help="Base URL for GitLab Enterprise instance") group.add_argument('--sleep-for-rate', dest='sleep_for_rate', action='store_true', help="sleep for getting more rate") group.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep', default=MIN_RATE_LIMIT, type=int, help="sleep until reset when the rate limit \ reaches this value") group.add_argument('--blacklist-ids', dest='blacklist_ids', nargs='*', type=int, help="Ids of items that must not be retrieved.") # Generic client options group.add_argument('--max-retries', dest='max_retries', default=MAX_RETRIES, type=int, help="number of API call retries") group.add_argument('--sleep-time', dest='sleep_time', default=DEFAULT_SLEEP_TIME, type=int, help="sleeping time between API call retries") # Positional arguments parser.parser.add_argument('owner', help="GitLab owner") parser.parser.add_argument('repository', help="GitLab repository") return parser
python
{ "resource": "" }
q268924
Slack.fetch
test
def fetch(self, category=CATEGORY_MESSAGE, from_date=DEFAULT_DATETIME): """Fetch the messages from the channel. This method fetches the messages stored on the channel that were sent since the given date. :param category: the category of items to fetch :param from_date: obtain messages sent since this date :returns: a generator of messages """ if not from_date: from_date = DEFAULT_DATETIME from_date = datetime_to_utc(from_date) latest = datetime_utcnow().timestamp() kwargs = {'from_date': from_date, 'latest': latest} items = super().fetch(category, **kwargs) return items
python
{ "resource": "" }
q268925
Slack.metadata_id
test
def metadata_id(item): """Extracts the identifier from a Slack item. This identifier will be the mix of two fields because Slack messages does not have any unique identifier. In this case, 'ts' and 'user' values (or 'bot_id' when the message is sent by a bot) are combined because there have been cases where two messages were sent by different users at the same time. """ if 'user' in item: nick = item['user'] elif 'comment' in item: nick = item['comment']['user'] else: nick = item['bot_id'] return item['ts'] + nick
python
{ "resource": "" }
q268926
SlackClient.conversation_members
test
def conversation_members(self, conversation): """Fetch the number of members in a conversation, which is a supertype for public and private ones, DM and group DM. :param conversation: the ID of the conversation """ members = 0 resource = self.RCONVERSATION_INFO params = { self.PCHANNEL: conversation, } raw_response = self._fetch(resource, params) response = json.loads(raw_response) members += len(response["members"]) while 'next_cursor' in response['response_metadata'] and response['response_metadata']['next_cursor']: params['cursor'] = response['response_metadata']['next_cursor'] raw_response = self._fetch(resource, params) response = json.loads(raw_response) members += len(response["members"]) return members
python
{ "resource": "" }
q268927
SlackClient.channel_info
test
def channel_info(self, channel): """Fetch information about a channel.""" resource = self.RCHANNEL_INFO params = { self.PCHANNEL: channel, } response = self._fetch(resource, params) return response
python
{ "resource": "" }
q268928
SlackClient.user
test
def user(self, user_id): """Fetch user info.""" resource = self.RUSER_INFO params = { self.PUSER: user_id } response = self._fetch(resource, params) return response
python
{ "resource": "" }
q268929
SlackCommand.setup_cmd_parser
test
def setup_cmd_parser(cls): """Returns the Slack argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True, archive=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Slack options group = parser.parser.add_argument_group('Slack arguments') group.add_argument('--max-items', dest='max_items', type=int, default=MAX_ITEMS, help="Maximum number of items requested on the same query") # Required arguments parser.parser.add_argument('channel', help="Slack channel identifier") return parser
python
{ "resource": "" }
q268930
Bugzilla.metadata_updated_on
test
def metadata_updated_on(item): """Extracts and coverts the update time from a Bugzilla item. The timestamp is extracted from 'delta_ts' field. This date is converted to UNIX timestamp format. Due Bugzilla servers ignore the timezone on HTTP requests, it will be ignored during the conversion, too. :param item: item generated by the backend :returns: a UNIX timestamp """ ts = item['delta_ts'][0]['__text__'] ts = str_to_datetime(ts) ts = ts.replace(tzinfo=dateutil.tz.tzutc()) return ts.timestamp()
python
{ "resource": "" }
q268931
Bugzilla.parse_buglist
test
def parse_buglist(raw_csv): """Parse a Bugzilla CSV bug list. The method parses the CSV file and returns an iterator of dictionaries. Each one of this, contains the summary of a bug. :param raw_csv: CSV string to parse :returns: a generator of parsed bugs """ reader = csv.DictReader(raw_csv.split('\n'), delimiter=',', quotechar='"') for row in reader: yield row
python
{ "resource": "" }
q268932
Bugzilla.parse_bugs_details
test
def parse_bugs_details(raw_xml): """Parse a Bugilla bugs details XML stream. This method returns a generator which parses the given XML, producing an iterator of dictionaries. Each dictionary stores the information related to a parsed bug. If the given XML is invalid or does not contains any bug, the method will raise a ParseError exception. :param raw_xml: XML string to parse :returns: a generator of parsed bugs :raises ParseError: raised when an error occurs parsing the given XML stream """ bugs = xml_to_dict(raw_xml) if 'bug' not in bugs: cause = "No bugs found. XML stream seems to be invalid." raise ParseError(cause=cause) for bug in bugs['bug']: yield bug
python
{ "resource": "" }
q268933
Bugzilla.parse_bug_activity
test
def parse_bug_activity(raw_html): """Parse a Bugzilla bug activity HTML stream. This method extracts the information about activity from the given HTML stream. The bug activity is stored into a HTML table. Each parsed activity event is returned into a dictionary. If the given HTML is invalid, the method will raise a ParseError exception. :param raw_html: HTML string to parse :returns: a generator of parsed activity events :raises ParseError: raised when an error occurs parsing the given HTML stream """ def is_activity_empty(bs): EMPTY_ACTIVITY = "No changes have been made to this (?:bug|issue) yet." tag = bs.find(text=re.compile(EMPTY_ACTIVITY)) return tag is not None def find_activity_table(bs): # The first table with 5 columns is the table of activity tables = bs.find_all('table') for tb in tables: nheaders = len(tb.tr.find_all('th', recursive=False)) if nheaders == 5: return tb raise ParseError(cause="Table of bug activity not found.") def remove_tags(bs): HTML_TAGS_TO_REMOVE = ['a', 'i', 'span'] for tag in bs.find_all(HTML_TAGS_TO_REMOVE): tag.replaceWith(tag.text) def format_text(bs): strings = [s.strip(' \n\t') for s in bs.stripped_strings] s = ' '.join(strings) return s # Parsing starts here bs = bs4.BeautifulSoup(raw_html, 'html.parser') if is_activity_empty(bs): fields = [] else: activity_tb = find_activity_table(bs) remove_tags(activity_tb) fields = activity_tb.find_all('td') while fields: # First two fields: 'Who' and 'When'. who = fields.pop(0) when = fields.pop(0) # The attribute 'rowspan' of 'who' field tells how many # changes were made on the same date. n = int(who.get('rowspan')) # Next fields are split into chunks of three elements: # 'What', 'Removed' and 'Added'. These chunks share # 'Who' and 'When' values. for _ in range(n): what = fields.pop(0) removed = fields.pop(0) added = fields.pop(0) event = {'Who': format_text(who), 'When': format_text(when), 'What': format_text(what), 'Removed': format_text(removed), 'Added': format_text(added)} yield event
python
{ "resource": "" }
q268934
BugzillaClient.logout
test
def logout(self): """Logout from the server.""" params = { self.PLOGOUT: '1' } self.call(self.CGI_LOGIN, params) self._close_http_session() logger.debug("Bugzilla user logged out from %s", self.base_url)
python
{ "resource": "" }
q268935
BugzillaClient.metadata
test
def metadata(self): """Get metadata information in XML format.""" params = { self.PCTYPE: self.CTYPE_XML } response = self.call(self.CGI_BUG, params) return response
python
{ "resource": "" }
q268936
BugzillaClient.buglist
test
def buglist(self, from_date=DEFAULT_DATETIME): """Get a summary of bugs in CSV format. :param from_date: retrieve bugs that where updated from that date """ if not self.version: self.version = self.__fetch_version() if self.version in self.OLD_STYLE_VERSIONS: order = 'Last+Changed' else: order = 'changeddate' date = from_date.strftime("%Y-%m-%d %H:%M:%S") params = { self.PCHFIELD_FROM: date, self.PCTYPE: self.CTYPE_CSV, self.PLIMIT: self.max_bugs_csv, self.PORDER: order } response = self.call(self.CGI_BUGLIST, params) return response
python
{ "resource": "" }
q268937
BugzillaClient.bugs
test
def bugs(self, *bug_ids): """Get the information of a list of bugs in XML format. :param bug_ids: list of bug identifiers """ params = { self.PBUG_ID: bug_ids, self.PCTYPE: self.CTYPE_XML, self.PEXCLUDE_FIELD: 'attachmentdata' } response = self.call(self.CGI_BUG, params) return response
python
{ "resource": "" }
q268938
BugzillaClient.bug_activity
test
def bug_activity(self, bug_id): """Get the activity of a bug in HTML format. :param bug_id: bug identifier """ params = { self.PBUG_ID: bug_id } response = self.call(self.CGI_BUG_ACTIVITY, params) return response
python
{ "resource": "" }
q268939
Meetup.fetch
test
def fetch(self, category=CATEGORY_EVENT, from_date=DEFAULT_DATETIME, to_date=None, filter_classified=False): """Fetch the events from the server. This method fetches those events of a group stored on the server that were updated since the given date. Data comments and rsvps are included within each event. :param category: the category of items to fetch :param from_date: obtain events updated since this date :param to_date: obtain events updated before this date :param filter_classified: remove classified fields from the resulting items :returns: a generator of events """ if not from_date: from_date = DEFAULT_DATETIME from_date = datetime_to_utc(from_date) kwargs = {"from_date": from_date, "to_date": to_date} items = super().fetch(category, filter_classified=filter_classified, **kwargs) return items
python
{ "resource": "" }
q268940
Meetup.fetch_items
test
def fetch_items(self, category, **kwargs): """Fetch the events :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] to_date = kwargs['to_date'] logger.info("Fetching events of '%s' group from %s to %s", self.group, str(from_date), str(to_date) if to_date else '--') to_date_ts = datetime_to_utc(to_date).timestamp() if to_date else None nevents = 0 stop_fetching = False ev_pages = self.client.events(self.group, from_date=from_date) for evp in ev_pages: events = [event for event in self.parse_json(evp)] for event in events: event_id = event['id'] event['comments'] = self.__fetch_and_parse_comments(event_id) event['rsvps'] = self.__fetch_and_parse_rsvps(event_id) # Check events updated before 'to_date' event_ts = self.metadata_updated_on(event) if to_date_ts and event_ts >= to_date_ts: stop_fetching = True continue yield event nevents += 1 if stop_fetching: break logger.info("Fetch process completed: %s events fetched", nevents)
python
{ "resource": "" }
q268941
MeetupClient.events
test
def events(self, group, from_date=DEFAULT_DATETIME): """Fetch the events pages of a given group.""" date = datetime_to_utc(from_date) date = date.strftime("since:%Y-%m-%dT%H:%M:%S.000Z") resource = urijoin(group, self.REVENTS) # Hack required due to Metup API does not support list # values with the format `?param=value1&param=value2`. # It only works with `?param=value1,value2`. # Morever, urrlib3 encodes comma characters when values # are given using params dict, which it doesn't work # with Meetup, either. fixed_params = '?' + self.PFIELDS + '=' + ','.join(self.VEVENT_FIELDS) fixed_params += '&' + self.PSTATUS + '=' + ','.join(self.VSTATUS) resource += fixed_params params = { self.PORDER: self.VUPDATED, self.PSCROLL: date, self.PPAGE: self.max_items } try: for page in self._fetch(resource, params): yield page except requests.exceptions.HTTPError as error: if error.response.status_code == 410: msg = "Group is no longer accessible: {}".format(error) raise RepositoryError(cause=msg) else: raise error
python
{ "resource": "" }
q268942
MeetupClient.comments
test
def comments(self, group, event_id): """Fetch the comments of a given event.""" resource = urijoin(group, self.REVENTS, event_id, self.RCOMMENTS) params = { self.PPAGE: self.max_items } for page in self._fetch(resource, params): yield page
python
{ "resource": "" }
q268943
MeetupClient.rsvps
test
def rsvps(self, group, event_id): """Fetch the rsvps of a given event.""" resource = urijoin(group, self.REVENTS, event_id, self.RRSVPS) # Same hack that in 'events' method fixed_params = '?' + self.PFIELDS + '=' + ','.join(self.VRSVP_FIELDS) fixed_params += '&' + self.PRESPONSE + '=' + ','.join(self.VRESPONSE) resource += fixed_params params = { self.PPAGE: self.max_items } for page in self._fetch(resource, params): yield page
python
{ "resource": "" }
q268944
Askbot.__fetch_question
test
def __fetch_question(self, question): """Fetch an Askbot HTML question body. The method fetchs the HTML question retrieving the question body of the item question received :param question: item with the question itself :returns: a list of HTML page/s for the question """ html_question_items = [] npages = 1 next_request = True while next_request: try: html_question = self.client.get_html_question(question['id'], npages) html_question_items.append(html_question) tpages = self.ab_parser.parse_number_of_html_pages(html_question) if npages == tpages: next_request = False npages = npages + 1 except requests.exceptions.TooManyRedirects as e: logger.warning("%s, data not retrieved for question %s", e, question['id']) next_request = False return html_question_items
python
{ "resource": "" }
q268945
Askbot.__fetch_comments
test
def __fetch_comments(self, question): """Fetch all the comments of an Askbot question and answers. The method fetchs the list of every comment existing in a question and its answers. :param question: item with the question itself :returns: a list of comments with the ids as hashes """ comments = {} comments[question['id']] = json.loads(self.client.get_comments(question['id'])) for object_id in question['answer_ids']: comments[object_id] = json.loads(self.client.get_comments(object_id)) return comments
python
{ "resource": "" }
q268946
Askbot.__build_question
test
def __build_question(html_question, question, comments): """Build an Askbot HTML response. The method puts together all the information regarding a question :param html_question: array of HTML raw pages :param question: question object from the API :param comments: list of comments to add :returns: a dict item with the parsed question information """ question_object = {} # Parse the user info from the soup container question_container = AskbotParser.parse_question_container(html_question[0]) # Add the info to the question object question_object.update(question_container) # Add the comments of the question (if any) if comments[int(question['id'])]: question_object['comments'] = comments[int(question['id'])] answers = [] for page in html_question: answers.extend(AskbotParser.parse_answers(page)) if len(answers) != 0: question_object['answers'] = answers for answer in question_object['answers']: if comments[int(answer['id'])]: answer['comments'] = comments[int(answer['id'])] return question_object
python
{ "resource": "" }
q268947
AskbotClient.get_api_questions
test
def get_api_questions(self, path): """Retrieve a question page using the API. :param page: page to retrieve """ npages = 1 next_request = True path = urijoin(self.base_url, path) while next_request: try: params = { 'page': npages, 'sort': self.ORDER_API } response = self.fetch(path, payload=params) whole_page = response.text raw_questions = json.loads(whole_page) tpages = raw_questions['pages'] logger.debug("Fetching questions from '%s': page %s/%s", self.base_url, npages, tpages) if npages == tpages: next_request = False npages = npages + 1 yield raw_questions except requests.exceptions.TooManyRedirects as e: logger.warning("%s, data not retrieved for resource %s", e, path) next_request = False
python
{ "resource": "" }
q268948
AskbotClient.get_html_question
test
def get_html_question(self, question_id, page=1): """Retrieve a raw HTML question and all it's information. :param question_id: question identifier :param page: page to retrieve """ path = urijoin(self.base_url, self.HTML_QUESTION, question_id) params = { 'page': page, 'sort': self.ORDER_HTML } response = self.fetch(path, payload=params) return response.text
python
{ "resource": "" }
q268949
AskbotClient.get_comments
test
def get_comments(self, post_id): """Retrieve a list of comments by a given id. :param object_id: object identifiere """ path = urijoin(self.base_url, self.COMMENTS if self._use_new_urls else self.COMMENTS_OLD) params = { 'post_id': post_id, 'post_type': 'answer', 'avatar_size': 0 } headers = {'X-Requested-With': 'XMLHttpRequest'} try: response = self.fetch(path, payload=params, headers=headers) raw = response.text except requests.exceptions.HTTPError as ex: if ex.response.status_code == 404: logger.debug("Comments URL did not work. Using old URL schema.") self._use_new_urls = False path = urijoin(self.base_url, self.COMMENTS_OLD) response = self.fetch(path, payload=params, headers=headers) raw = response.text elif ex.response.status_code == 500: logger.warning("Comments not retrieved due to %s", ex) raw = '[]' else: raise ex return raw
python
{ "resource": "" }
q268950
AskbotParser.parse_question_container
test
def parse_question_container(html_question): """Parse the question info container of a given HTML question. The method parses the information available in the question information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param html_question: raw HTML question element :returns: an object with the parsed information """ container_info = {} bs_question = bs4.BeautifulSoup(html_question, "html.parser") question = AskbotParser._find_question_container(bs_question) container = question.select("div.post-update-info") created = container[0] container_info['author'] = AskbotParser.parse_user_info(created) try: container[1] except IndexError: pass else: updated = container[1] if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info
python
{ "resource": "" }
q268951
AskbotParser.parse_answers
test
def parse_answers(html_question): """Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers """ def parse_answer_container(update_info): """Parse the answer info container of a given HTML question. The method parses the information available in the answer information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param update_info: beautiful soup update_info container element :returns: an object with the parsed information """ container_info = {} created = update_info[0] answered_at = created.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['added_at'] = str(str_to_datetime(answered_at).timestamp()) container_info['answered_by'] = AskbotParser.parse_user_info(created) try: update_info[1] except IndexError: pass else: updated = update_info[1] updated_at = updated.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['updated_at'] = str(str_to_datetime(updated_at).timestamp()) if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info answer_list = [] # Select all the answers bs_question = bs4.BeautifulSoup(html_question, "html.parser") bs_answers = bs_question.select("div.answer") for bs_answer in bs_answers: answer_id = bs_answer.attrs["data-post-id"] votes_element = bs_answer.select("div.vote-number")[0].text accepted_answer = bs_answer.select("div.answer-img-accept")[0].get('title').endswith("correct") # Select the body of the answer body = bs_answer.select("div.post-body") # Get the user information container and parse it update_info = body[0].select("div.post-update-info") answer_container = parse_answer_container(update_info) # Remove the update-info-container div to be able to get the body body[0].div.extract().select("div.post-update-info-container") # Override the body with a clean one body = body[0].get_text(strip=True) # Generate the answer object answer = {'id': answer_id, 'score': votes_element, 'summary': body, 'accepted': accepted_answer } # Update the object with the information in the answer container answer.update(answer_container) answer_list.append(answer) return answer_list
python
{ "resource": "" }
q268952
AskbotParser.parse_number_of_html_pages
test
def parse_number_of_html_pages(html_question): """Parse number of answer pages to paginate over them. :param html_question: raw HTML question element :returns: an integer with the number of pages """ bs_question = bs4.BeautifulSoup(html_question, "html.parser") try: bs_question.select('div.paginator')[0] except IndexError: return 1 else: return int(bs_question.select('div.paginator')[0].attrs['data-num-pages'])
python
{ "resource": "" }
q268953
AskbotParser.parse_user_info
test
def parse_user_info(update_info): """Parse the user information of a given HTML container. The method parses all the available user information in the container. If the class "user-info" exists, the method will get all the available information in the container. If not, if a class "tip" exists, it will be a wiki post with no user associated. Else, it can be an empty container. :param update_info: beautiful soup answer container element :returns: an object with the parsed information """ user_info = {} if update_info.select("div.user-info"): # Get all the <a> elements in the container. First <a> contains the user # information, second one (if exists), the website of the user. elements = update_info.select("div.user-info")[0].find_all("a") href = elements[0].attrs["href"] user_info['id'] = re.search(r'\d+', href).group(0) user_info['username'] = elements[0].text user_info['reputation'] = update_info.select('span.reputation-score')[0].text user_info['badges'] = update_info.select("span.badges")[0].attrs["title"] try: elements[1] except IndexError: pass else: user_info['website'] = elements[1].attrs["href"] if update_info.select("img.flag"): flag = update_info.select("img.flag")[0].attrs["alt"] user_info['country'] = re.sub("flag of ", "", flag) return user_info
python
{ "resource": "" }
q268954
Gerrit.fetch_items
test
def fetch_items(self, category, **kwargs): """Fetch the reviews :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] if self.client.version[0] == 2 and self.client.version[1] == 8: fetcher = self._fetch_gerrit28(from_date) else: fetcher = self._fetch_gerrit(from_date) for review in fetcher: yield review
python
{ "resource": "" }
q268955
Gerrit.parse_reviews
test
def parse_reviews(raw_data): """Parse a Gerrit reviews list.""" # Join isolated reviews in JSON in array for parsing items_raw = "[" + raw_data.replace("\n", ",") + "]" items_raw = items_raw.replace(",]", "]") items = json.loads(items_raw) reviews = [] for item in items: if 'project' in item.keys(): reviews.append(item) return reviews
python
{ "resource": "" }
q268956
Gerrit._fetch_gerrit28
test
def _fetch_gerrit28(self, from_date=DEFAULT_DATETIME): """ Specific fetch for gerrit 2.8 version. Get open and closed reviews in different queries. Take the newer review from both lists and iterate. """ # Convert date to Unix time from_ut = datetime_to_utc(from_date) from_ut = from_ut.timestamp() filter_open = "status:open" filter_closed = "status:closed" last_item_open = self.client.next_retrieve_group_item() last_item_closed = self.client.next_retrieve_group_item() reviews_open = self._get_reviews(last_item_open, filter_open) reviews_closed = self._get_reviews(last_item_closed, filter_closed) last_nreviews_open = len(reviews_open) last_nreviews_closed = len(reviews_closed) while reviews_open or reviews_closed: if reviews_open and reviews_closed: if reviews_open[0]['lastUpdated'] >= reviews_closed[0]['lastUpdated']: review_open = reviews_open.pop(0) review = review_open else: review_closed = reviews_closed.pop(0) review = review_closed elif reviews_closed: review_closed = reviews_closed.pop(0) review = review_closed else: review_open = reviews_open.pop(0) review = review_open updated = review['lastUpdated'] if updated <= from_ut: logger.debug("No more updates for %s" % (self.hostname)) break else: yield review if not reviews_open and last_nreviews_open >= self.max_reviews: last_item_open = self.client.next_retrieve_group_item(last_item_open, review_open) reviews_open = self._get_reviews(last_item_open, filter_open) last_nreviews_open = len(reviews_open) if not reviews_closed and last_nreviews_closed >= self.max_reviews: last_item_closed = self.client.next_retrieve_group_item(last_item_closed, review_closed) reviews_closed = self._get_reviews(last_item_closed, filter_closed) last_nreviews_closed = len(reviews_closed)
python
{ "resource": "" }
q268957
GerritClient.version
test
def version(self): """Return the Gerrit server version.""" if self._version: return self._version cmd = self.gerrit_cmd + " %s " % (GerritClient.CMD_VERSION) logger.debug("Getting version: %s" % (cmd)) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") logger.debug("Gerrit version: %s" % (raw_data)) # output: gerrit version 2.10-rc1-988-g333a9dd m = re.match(GerritClient.VERSION_REGEX, raw_data) if not m: cause = "Invalid gerrit version %s" % raw_data raise BackendError(cause=cause) try: mayor = int(m.group(1)) minor = int(m.group(2)) except Exception: cause = "Gerrit client could not determine the server version." raise BackendError(cause=cause) self._version = [mayor, minor] return self._version
python
{ "resource": "" }
q268958
GerritClient.reviews
test
def reviews(self, last_item, filter_=None): """Get the reviews starting from last_item.""" cmd = self._get_gerrit_cmd(last_item, filter_) logger.debug("Getting reviews with command: %s", cmd) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") return raw_data
python
{ "resource": "" }
q268959
GerritClient.next_retrieve_group_item
test
def next_retrieve_group_item(self, last_item=None, entry=None): """Return the item to start from in next reviews group.""" next_item = None gerrit_version = self.version if gerrit_version[0] == 2 and gerrit_version[1] > 9: if last_item is None: next_item = 0 else: next_item = last_item elif gerrit_version[0] == 2 and gerrit_version[1] == 9: # https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E cause = "Gerrit 2.9.0 does not support pagination" raise BackendError(cause=cause) else: if entry is not None: next_item = entry['sortKey'] return next_item
python
{ "resource": "" }
q268960
GerritClient.__execute
test
def __execute(self, cmd): """Execute gerrit command""" if self.from_archive: response = self.__execute_from_archive(cmd) else: response = self.__execute_from_remote(cmd) return response
python
{ "resource": "" }
q268961
GerritClient.__execute_from_archive
test
def __execute_from_archive(self, cmd): """Execute gerrit command against the archive""" cmd = self.sanitize_for_archive(cmd) response = self.archive.retrieve(cmd, None, None) if isinstance(response, RuntimeError): raise response return response
python
{ "resource": "" }
q268962
GerritClient.__execute_from_remote
test
def __execute_from_remote(self, cmd): """Execute gerrit command with retry if it fails""" result = None # data result from the cmd execution retries = 0 while retries < self.MAX_RETRIES: try: result = subprocess.check_output(cmd, shell=True) break except subprocess.CalledProcessError as ex: logger.error("gerrit cmd %s failed: %s", cmd, ex) time.sleep(self.RETRY_WAIT * retries) retries += 1 if result is None: result = RuntimeError(cmd + " failed " + str(self.MAX_RETRIES) + " times. Giving up!") if self.archive: cmd = self.sanitize_for_archive(cmd) self.archive.store(cmd, None, None, result) if isinstance(result, RuntimeError): raise result return result
python
{ "resource": "" }
q268963
GerritCommand.setup_cmd_parser
test
def setup_cmd_parser(cls): """Returns the Gerrit argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, archive=True) # Gerrit options group = parser.parser.add_argument_group('Gerrit arguments') group.add_argument('--user', dest='user', help="Gerrit ssh user") group.add_argument('--max-reviews', dest='max_reviews', type=int, default=MAX_REVIEWS, help="Max number of reviews per ssh query.") group.add_argument('--blacklist-reviews', dest='blacklist_reviews', nargs='*', help="Wrong reviews that must not be retrieved.") group.add_argument('--disable-host-key-check', dest='disable_host_key_check', action='store_true', help="Don't check remote host identity") group.add_argument('--ssh-port', dest='port', default=PORT, type=int, help="Set SSH port of the Gerrit server") # Required arguments parser.parser.add_argument('hostname', help="Hostname of the Gerrit server") return parser
python
{ "resource": "" }
q268964
Launchpad.__fetch_issue_data
test
def __fetch_issue_data(self, issue_id): """Get data associated to an issue""" raw_issue = self.client.issue(issue_id) issue = json.loads(raw_issue) return issue
python
{ "resource": "" }
q268965
Launchpad.__fetch_issue_attachments
test
def __fetch_issue_attachments(self, issue_id): """Get attachments of an issue""" for attachments_raw in self.client.issue_collection(issue_id, "attachments"): attachments = json.loads(attachments_raw) for attachment in attachments['entries']: yield attachment
python
{ "resource": "" }
q268966
Launchpad.__fetch_issue_messages
test
def __fetch_issue_messages(self, issue_id): """Get messages of an issue""" for messages_raw in self.client.issue_collection(issue_id, "messages"): messages = json.loads(messages_raw) for msg in messages['entries']: msg['owner_data'] = self.__fetch_user_data('{OWNER}', msg['owner_link']) yield msg
python
{ "resource": "" }
q268967
Launchpad.__fetch_issue_activities
test
def __fetch_issue_activities(self, issue_id): """Get activities on an issue""" for activities_raw in self.client.issue_collection(issue_id, "activity"): activities = json.loads(activities_raw) for act in activities['entries']: act['person_data'] = self.__fetch_user_data('{PERSON}', act['person_link']) yield act
python
{ "resource": "" }
q268968
Launchpad.__fetch_user_data
test
def __fetch_user_data(self, tag_type, user_link): """Get data associated to an user""" user_name = self.client.user_name(user_link) user = {} if not user_name: return user user_raw = self.client.user(user_name) user = json.loads(user_raw) return user
python
{ "resource": "" }
q268969
LaunchpadClient.user
test
def user(self, user_name): """Get the user data by URL""" user = None if user_name in self._users: return self._users[user_name] url_user = self.__get_url("~" + user_name) logger.info("Getting info for %s" % (url_user)) try: raw_user = self.__send_request(url_user) user = raw_user except requests.exceptions.HTTPError as e: if e.response.status_code in [404, 410]: logger.warning("Data is not available - %s", url_user) user = '{}' else: raise e self._users[user_name] = user return user
python
{ "resource": "" }
q268970
LaunchpadClient.issue
test
def issue(self, issue_id): """Get the issue data by its ID""" path = urijoin("bugs", str(issue_id)) url_issue = self.__get_url(path) raw_text = self.__send_request(url_issue) return raw_text
python
{ "resource": "" }
q268971
LaunchpadClient.issue_collection
test
def issue_collection(self, issue_id, collection_name): """Get a collection list of a given issue""" path = urijoin("bugs", str(issue_id), collection_name) url_collection = self.__get_url(path) payload = {'ws.size': self.items_per_page, 'ws.start': 0, 'order_by': 'date_last_updated'} raw_items = self.__fetch_items(path=url_collection, payload=payload) return raw_items
python
{ "resource": "" }
q268972
LaunchpadClient.__get_url_project
test
def __get_url_project(self): """Build URL project""" if self.package: url = self.__get_url_distribution_package() else: url = self.__get_url_distribution() return url
python
{ "resource": "" }
q268973
LaunchpadClient.__fetch_items
test
def __fetch_items(self, path, payload): """Return the items from Launchpad API using pagination""" page = 0 # current page url_next = path fetch_data = True while fetch_data: logger.debug("Fetching page: %i", page) try: raw_content = self.__send_request(url_next, payload) content = json.loads(raw_content) except requests.exceptions.HTTPError as e: if e.response.status_code in [410]: logger.warning("Data is not available - %s", url_next) raw_content = '{"total_size": 0, "start": 0, "entries": []}' content = json.loads(raw_content) else: raise e if 'next_collection_link' in content: url_next = content['next_collection_link'] payload = None else: fetch_data = False yield raw_content page += 1
python
{ "resource": "" }
q268974
GroupsioClient.subscriptions
test
def subscriptions(self, per_page=PER_PAGE): """Fetch the groupsio paginated subscriptions for a given token :param per_page: number of subscriptions per page :returns: an iterator of subscriptions """ url = urijoin(GROUPSIO_API_URL, self.GET_SUBSCRIPTIONS) logger.debug("Get groupsio paginated subscriptions from " + url) keep_fetching = True payload = { "limit": per_page } while keep_fetching: r = self.__fetch(url, payload) response_raw = r.json() subscriptions = response_raw['data'] yield subscriptions total_subscriptions = response_raw['total_count'] logger.debug("Subscriptions: %i/%i" % (response_raw['end_item'], total_subscriptions)) payload['page_token'] = response_raw['next_page_token'] keep_fetching = response_raw['has_more']
python
{ "resource": "" }
q268975
GroupsioClient.__find_group_id
test
def __find_group_id(self): """Find the id of a group given its name by iterating on the list of subscriptions""" group_subscriptions = self.subscriptions(self.auth) for subscriptions in group_subscriptions: for sub in subscriptions: if sub['group_name'] == self.group_name: return sub['group_id'] msg = "Group id not found for group name %s" % self.group_name raise BackendError(cause=msg)
python
{ "resource": "" }
q268976
GroupsioClient.__fetch
test
def __fetch(self, url, payload): """Fetch requests from groupsio API""" r = requests.get(url, params=payload, auth=self.auth, verify=self.verify) try: r.raise_for_status() except requests.exceptions.HTTPError as e: raise e return r
python
{ "resource": "" }
q268977
GroupsioCommand.setup_cmd_parser
test
def setup_cmd_parser(cls): """Returns the Groupsio argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Optional arguments group = parser.parser.add_argument_group('Groupsio arguments') group.add_argument('--mboxes-path', dest='mboxes_path', help="Path where mbox files will be stored") group.add_argument('--no-verify', dest='verify', action='store_false', help="Value 'True' enable SSL verification") # Required arguments parser.parser.add_argument('group_name', help="Name of the group on Groups.io") return parser
python
{ "resource": "" }
q268978
uuid
test
def uuid(*args): """Generate a UUID based on the given parameters. The UUID will be the SHA1 of the concatenation of the values from the list. The separator bewteedn these values is ':'. Each value must be a non-empty string, otherwise, the function will raise an exception. :param *args: list of arguments used to generate the UUID :returns: a universal unique identifier :raises ValueError: when anyone of the values is not a string, is empty or `None`. """ def check_value(v): if not isinstance(v, str): raise ValueError("%s value is not a string instance" % str(v)) elif not v: raise ValueError("value cannot be None or empty") else: return v s = ':'.join(map(check_value, args)) sha1 = hashlib.sha1(s.encode('utf-8', errors='surrogateescape')) uuid_sha1 = sha1.hexdigest() return uuid_sha1
python
{ "resource": "" }
q268979
fetch
test
def fetch(backend_class, backend_args, category, filter_classified=False, manager=None): """Fetch items using the given backend. Generator to get items using the given backend class. When an archive manager is given, this function will store the fetched items in an `Archive`. If an exception is raised, this archive will be removed to avoid corrupted archives. The parameters needed to initialize the `backend` class and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to fetch items :param backend_args: dict of arguments needed to fetch the items :param category: category of the items to retrieve. If None, it will use the default backend category :param filter_classified: remove classified fields from the resulting items :param manager: archive manager needed to store the items :returns: a generator of items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) archive = manager.create_archive() if manager else None init_args['archive'] = archive backend = backend_class(**init_args) if category: backend_args['category'] = category if filter_classified: backend_args['filter_classified'] = filter_classified fetch_args = find_signature_parameters(backend.fetch, backend_args) items = backend.fetch(**fetch_args) try: for item in items: yield item except Exception as e: if manager: archive_path = archive.archive_path manager.remove_archive(archive_path) raise e
python
{ "resource": "" }
q268980
fetch_from_archive
test
def fetch_from_archive(backend_class, backend_args, manager, category, archived_after): """Fetch items from an archive manager. Generator to get the items of a category (previously fetched by the given backend class) from an archive manager. Only those items archived after the given date will be returned. The parameters needed to initialize `backend` and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to retrive items :param backend_args: dict of arguments needed to retrieve the items :param manager: archive manager where the items will be retrieved :param category: category of the items to retrieve :param archived_after: return items archived after this date :returns: a generator of archived items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) backend = backend_class(**init_args) filepaths = manager.search(backend.origin, backend.__class__.__name__, category, archived_after) for filepath in filepaths: backend.archive = Archive(filepath) items = backend.fetch_from_archive() try: for item in items: yield item except ArchiveError as e: logger.warning("Ignoring %s archive due to: %s", filepath, str(e))
python
{ "resource": "" }
q268981
find_backends
test
def find_backends(top_package): """Find available backends. Look for the Perceval backends and commands under `top_package` and its sub-packages. When `top_package` defines a namespace, backends under that same namespace will be found too. :param top_package: package storing backends :returns: a tuple with two dicts: one with `Backend` classes and one with `BackendCommand` classes """ candidates = pkgutil.walk_packages(top_package.__path__, prefix=top_package.__name__ + '.') modules = [name for _, name, is_pkg in candidates if not is_pkg] return _import_backends(modules)
python
{ "resource": "" }
q268982
Backend.fetch
test
def fetch(self, category, filter_classified=False, **kwargs): """Fetch items from the repository. The method retrieves items from a repository. To removed classified fields from the resulting items, set the parameter `filter_classified`. Take into account this parameter is incompatible with archiving items. Raw client data are archived before any other process. Therefore, classified data are stored within the archive. To prevent from possible data leaks or security issues when users do not need these fields, archiving and filtering are not compatible. :param category: the category of the items fetched :param filter_classified: remove classified fields from the resulting items :param kwargs: a list of other parameters (e.g., from_date, offset, etc. specific for each backend) :returns: a generator of items :raises BackendError: either when the category is not valid or 'filter_classified' and 'archive' are active at the same time. """ if category not in self.categories: cause = "%s category not valid for %s" % (category, self.__class__.__name__) raise BackendError(cause=cause) if filter_classified and self.archive: cause = "classified fields filtering is not compatible with archiving items" raise BackendError(cause=cause) if self.archive: self.archive.init_metadata(self.origin, self.__class__.__name__, self.version, category, kwargs) self.client = self._init_client() for item in self.fetch_items(category, **kwargs): if filter_classified: item = self.filter_classified_data(item) yield self.metadata(item, filter_classified=filter_classified)
python
{ "resource": "" }
q268983
Backend.fetch_from_archive
test
def fetch_from_archive(self): """Fetch the questions from an archive. It returns the items stored within an archive. If this method is called but no archive was provided, the method will raise a `ArchiveError` exception. :returns: a generator of items :raises ArchiveError: raised when an error occurs accessing an archive """ if not self.archive: raise ArchiveError(cause="archive instance was not provided") self.client = self._init_client(from_archive=True) for item in self.fetch_items(self.archive.category, **self.archive.backend_params): yield self.metadata(item)
python
{ "resource": "" }
q268984
Backend.filter_classified_data
test
def filter_classified_data(self, item): """Remove classified or confidential data from an item. It removes those fields that contain data considered as classified. Classified fields are defined in `CLASSIFIED_FIELDS` class attribute. :param item: fields will be removed from this item :returns: the same item but with confidential data filtered """ item_uuid = uuid(self.origin, self.metadata_id(item)) logger.debug("Filtering classified data for item %s", item_uuid) for cf in self.CLASSIFIED_FIELDS: try: _remove_key_from_nested_dict(item, cf) except KeyError: logger.debug("Classified field '%s' not found for item %s; field ignored", '.'.join(cf), item_uuid) logger.debug("Classified data filtered for item %s", item_uuid) return item
python
{ "resource": "" }
q268985
BackendCommandArgumentParser.parse
test
def parse(self, *args): """Parse a list of arguments. Parse argument strings needed to run a backend command. The result will be a `argparse.Namespace` object populated with the values obtained after the validation of the parameters. :param args: argument strings :result: an object with the parsed values """ parsed_args = self.parser.parse_args(args) # Category was not set, remove it if parsed_args.category is None: delattr(parsed_args, 'category') if self._from_date: parsed_args.from_date = str_to_datetime(parsed_args.from_date) if self._to_date and parsed_args.to_date: parsed_args.to_date = str_to_datetime(parsed_args.to_date) if self._archive and parsed_args.archived_since: parsed_args.archived_since = str_to_datetime(parsed_args.archived_since) if self._archive and parsed_args.fetch_archive and parsed_args.no_archive: raise AttributeError("fetch-archive and no-archive arguments are not compatible") if self._archive and parsed_args.fetch_archive and not parsed_args.category: raise AttributeError("fetch-archive needs a category to work with") # Set aliases for alias, arg in self.aliases.items(): if (alias not in parsed_args) and (arg in parsed_args): value = getattr(parsed_args, arg, None) setattr(parsed_args, alias, value) return parsed_args
python
{ "resource": "" }
q268986
BackendCommandArgumentParser._set_auth_arguments
test
def _set_auth_arguments(self, basic_auth=True, token_auth=False): """Activate authentication arguments parsing""" group = self.parser.add_argument_group('authentication arguments') if basic_auth: group.add_argument('-u', '--backend-user', dest='user', help="backend user") group.add_argument('-p', '--backend-password', dest='password', help="backend password") if token_auth: group.add_argument('-t', '--api-token', dest='api_token', help="backend authentication token / API key")
python
{ "resource": "" }
q268987
BackendCommandArgumentParser._set_archive_arguments
test
def _set_archive_arguments(self): """Activate archive arguments parsing""" group = self.parser.add_argument_group('archive arguments') group.add_argument('--archive-path', dest='archive_path', default=None, help="directory path to the archives") group.add_argument('--no-archive', dest='no_archive', action='store_true', help="do not archive data") group.add_argument('--fetch-archive', dest='fetch_archive', action='store_true', help="fetch data from the archives") group.add_argument('--archived-since', dest='archived_since', default='1970-01-01', help="retrieve items archived since the given date")
python
{ "resource": "" }
q268988
BackendCommandArgumentParser._set_output_arguments
test
def _set_output_arguments(self): """Activate output arguments parsing""" group = self.parser.add_argument_group('output arguments') group.add_argument('-o', '--output', type=argparse.FileType('w'), dest='outfile', default=sys.stdout, help="output file") group.add_argument('--json-line', dest='json_line', action='store_true', help="produce a JSON line for each output item")
python
{ "resource": "" }
q268989
BackendCommand.run
test
def run(self): """Fetch and write items. This method runs the backend to fetch the items from the given origin. Items are converted to JSON objects and written to the defined output. If `fetch-archive` parameter was given as an argument during the inizialization of the instance, the items will be retrieved using the archive manager. """ backend_args = vars(self.parsed_args) category = backend_args.pop('category', None) filter_classified = backend_args.pop('filter_classified', False) archived_since = backend_args.pop('archived_since', None) if self.archive_manager and self.parsed_args.fetch_archive: items = fetch_from_archive(self.BACKEND, backend_args, self.archive_manager, category, archived_since) else: items = fetch(self.BACKEND, backend_args, category, filter_classified=filter_classified, manager=self.archive_manager) try: for item in items: if self.json_line: obj = json.dumps(item, separators=(',', ':'), sort_keys=True) else: obj = json.dumps(item, indent=4, sort_keys=True) self.outfile.write(obj) self.outfile.write('\n') except IOError as e: raise RuntimeError(str(e)) except Exception as e: raise RuntimeError(str(e))
python
{ "resource": "" }
q268990
BackendCommand._initialize_archive
test
def _initialize_archive(self): """Initialize archive based on the parsed parameters""" if 'archive_path' not in self.parsed_args: manager = None elif self.parsed_args.no_archive: manager = None else: if not self.parsed_args.archive_path: archive_path = os.path.expanduser(ARCHIVES_DEFAULT_PATH) else: archive_path = self.parsed_args.archive_path manager = ArchiveManager(archive_path) self.archive_manager = manager
python
{ "resource": "" }
q268991
MBox.metadata_updated_on
test
def metadata_updated_on(item): """Extracts the update time from a MBox item. The timestamp used is extracted from 'Date' field in its several forms. This date is converted to UNIX timestamp format. :param item: item generated by the backend :returns: a UNIX timestamp """ ts = item[MBox.DATE_FIELD] ts = str_to_datetime(ts) return ts.timestamp()
python
{ "resource": "" }
q268992
MBox.parse_mbox
test
def parse_mbox(filepath): """Parse a mbox file. This method parses a mbox file and returns an iterator of dictionaries. Each one of this contains an email message. :param filepath: path of the mbox to parse :returns : generator of messages; each message is stored in a dictionary of type `requests.structures.CaseInsensitiveDict` """ mbox = _MBox(filepath, create=False) for msg in mbox: message = message_to_dict(msg) yield message
python
{ "resource": "" }
q268993
MBox._fetch_and_parse_messages
test
def _fetch_and_parse_messages(self, mailing_list, from_date): """Fetch and parse the messages from a mailing list""" from_date = datetime_to_utc(from_date) nmsgs, imsgs, tmsgs = (0, 0, 0) for mbox in mailing_list.mboxes: tmp_path = None try: tmp_path = self._copy_mbox(mbox) for message in self.parse_mbox(tmp_path): tmsgs += 1 if not self._validate_message(message): imsgs += 1 continue # Ignore those messages sent before the given date dt = str_to_datetime(message[MBox.DATE_FIELD]) if dt < from_date: logger.debug("Message %s sent before %s; skipped", message['unixfrom'], str(from_date)) tmsgs -= 1 continue # Convert 'CaseInsensitiveDict' to dict message = self._casedict_to_dict(message) nmsgs += 1 logger.debug("Message %s parsed", message['unixfrom']) yield message except (OSError, EOFError) as e: logger.warning("Ignoring %s mbox due to: %s", mbox.filepath, str(e)) except Exception as e: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) raise e finally: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) logger.info("Done. %s/%s messages fetched; %s ignored", nmsgs, tmsgs, imsgs)
python
{ "resource": "" }
q268994
MBox._copy_mbox
test
def _copy_mbox(self, mbox): """Copy the contents of a mbox to a temporary file""" tmp_path = tempfile.mktemp(prefix='perceval_') with mbox.container as f_in: with open(tmp_path, mode='wb') as f_out: for l in f_in: f_out.write(l) return tmp_path
python
{ "resource": "" }
q268995
MBox._validate_message
test
def _validate_message(self, message): """Check if the given message has the mandatory fields""" # This check is "case insensitive" because we're # using 'CaseInsensitiveDict' from requests.structures # module to store the contents of a message. if self.MESSAGE_ID_FIELD not in message: logger.warning("Field 'Message-ID' not found in message %s; ignoring", message['unixfrom']) return False if not message[self.MESSAGE_ID_FIELD]: logger.warning("Field 'Message-ID' is empty in message %s; ignoring", message['unixfrom']) return False if self.DATE_FIELD not in message: logger.warning("Field 'Date' not found in message %s; ignoring", message['unixfrom']) return False if not message[self.DATE_FIELD]: logger.warning("Field 'Date' is empty in message %s; ignoring", message['unixfrom']) return False try: str_to_datetime(message[self.DATE_FIELD]) except InvalidDateError: logger.warning("Invalid date %s in message %s; ignoring", message[self.DATE_FIELD], message['unixfrom']) return False return True
python
{ "resource": "" }
q268996
MBox._casedict_to_dict
test
def _casedict_to_dict(self, message): """Convert a message in CaseInsensitiveDict to dict. This method also converts well known problematic headers, such as Message-ID and Date to a common name. """ message_id = message.pop(self.MESSAGE_ID_FIELD) date = message.pop(self.DATE_FIELD) msg = {k: v for k, v in message.items()} msg[self.MESSAGE_ID_FIELD] = message_id msg[self.DATE_FIELD] = date return msg
python
{ "resource": "" }
q268997
_MBox.get_message
test
def get_message(self, key): """Return a Message representation or raise a KeyError.""" start, stop = self._lookup(key) self._file.seek(start) from_line = self._file.readline().replace(mailbox.linesep, b'') string = self._file.read(stop - self._file.tell()) msg = self._message_factory(string.replace(mailbox.linesep, b'\n')) try: msg.set_from(from_line[5:].decode('ascii')) return msg except UnicodeDecodeError: pass try: msg.set_from(from_line[5:].decode('utf-8')) except UnicodeDecodeError: msg.set_from(from_line[5:].decode('iso-8859-1')) return msg
python
{ "resource": "" }
q268998
Git.fetch
test
def fetch(self, category=CATEGORY_COMMIT, from_date=DEFAULT_DATETIME, to_date=DEFAULT_LAST_DATETIME, branches=None, latest_items=False, no_update=False): """Fetch commits. The method retrieves from a Git repository or a log file a list of commits. Commits are returned in the same order they were obtained. When `from_date` parameter is given it returns items commited since the given date. The list of `branches` is a list of strings, with the names of the branches to fetch. If the list of branches is empty, no commit is fetched. If the list of branches is None, all commits for all branches will be fetched. The parameter `latest_items` returns only those commits which are new since the last time this method was called. The parameter `no_update` returns all commits without performing an update of the repository before. Take into account that `from_date` and `branches` are ignored when the commits are fetched from a Git log file or when `latest_items` flag is set. The class raises a `RepositoryError` exception when an error occurs accessing the repository. :param category: the category of items to fetch :param from_date: obtain commits newer than a specific date (inclusive) :param to_date: obtain commits older than a specific date :param branches: names of branches to fetch from (default: None) :param latest_items: sync with the repository to fetch only the newest commits :param no_update: if enabled, don't update the repo with the latest changes :returns: a generator of commits """ if not from_date: from_date = DEFAULT_DATETIME if not to_date: to_date = DEFAULT_LAST_DATETIME kwargs = { 'from_date': from_date, 'to_date': to_date, 'branches': branches, 'latest_items': latest_items, 'no_update': no_update } items = super().fetch(category, **kwargs) return items
python
{ "resource": "" }
q268999
Git.fetch_items
test
def fetch_items(self, category, **kwargs): """Fetch the commits :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] to_date = kwargs['to_date'] branches = kwargs['branches'] latest_items = kwargs['latest_items'] no_update = kwargs['no_update'] ncommits = 0 try: if os.path.isfile(self.gitpath): commits = self.__fetch_from_log() else: commits = self.__fetch_from_repo(from_date, to_date, branches, latest_items, no_update) for commit in commits: yield commit ncommits += 1 except EmptyRepositoryError: pass logger.info("Fetch process completed: %s commits fetched", ncommits)
python
{ "resource": "" }