docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Build a request from the given soup form. Args: host str: The URL of the current queue item. soup (obj): The BeautifulSoup form. Returns: :class:`nyawc.http.Request`: The new Request.
def __get_request(self, host, soup): url = URLHelper.make_absolute(host, self.__trim_grave_accent(soup["action"])) if soup.has_attr("action") else host method_original = soup["method"] if soup.has_attr("method") else "get" method = "post" if method_original.lower() == "post" else "get" data = self.__get_form_data(soup) return Request(url, method, data)
409,318
Trim grave accents manually (because BeautifulSoup doesn"t support it). Args: href (str): The BeautifulSoup href value. Returns: str: The BeautifulSoup href value without grave accents.
def __trim_grave_accent(self, href): if href.startswith("`"): href = href[1:] if href.endswith("`"): href = href[:-1] return href
409,319
Build a form data dict from the given form. Args: soup (obj): The BeautifulSoup form. Returns: obj: The form data (key/value).
def __get_form_data(self, soup): elements = self.__get_valid_form_data_elements(soup) form_data = self.__get_default_form_data_input(elements) callback = self.options.callbacks.form_before_autofill action = callback(self.queue_item, elements, form_data) if action == CrawlerActions.DO_AUTOFILL_FORM: self.__autofill_form_data(form_data, elements) return form_data
409,320
Get all valid form input elements. Note: An element is valid when the value can be updated client-side and the element has a name attribute. Args: soup (obj): The BeautifulSoup form. Returns: list(obj): Soup elements.
def __get_valid_form_data_elements(self, soup): elements = [] for element in soup.find_all(["input", "button", "textarea", "select"]): if element.has_attr("name"): elements.append(element) return elements
409,321
Get the default form data {key: value} for the given elements. Args: elements list(obj): Soup elements. Returns: obj: The {key: value} form data
def __get_default_form_data_input(self, elements): form_data = OrderedDict() for element in elements: default_value = self.__get_default_value_from_element(element) if default_value is False: continue form_data[element["name"]] = default_value return form_data
409,322
Autofill empty form data with random data. Args: form_data (obj): The {key: value} form data elements list(obj): Soup elements. Returns: obj: The {key: value}
def __autofill_form_data(self, form_data, elements): for element in elements: if not element["name"] in form_data: continue if not len(form_data[element["name"]]) is 0: continue if element.name == "textarea": form_data[element["name"]] = RandomInputHelper.get_for_type("textarea") continue if element.has_attr("type"): form_data[element["name"]] = RandomInputHelper.get_for_type(element["type"])
409,323
Get the default value of a form element Args: elements (obj): The soup element. Returns: str: The default value
def __get_default_value_from_element(self, element): if element.name == "select": options = element.find_all("option") is_multiple = element.has_attr("multiple") selected_options = [ option for option in options if option.has_attr("selected") ] if not selected_options and options: selected_options = [options[0]] selected_values = [] if is_multiple: for option in selected_options: value = option["value"] if option.has_attr("value") else option.string selected_values.append(value) return selected_values elif len(selected_options) >= 1: if selected_options[0].has_attr("value"): return selected_options[0]["value"] else: return selected_options[0].string return "" if element.name == "textarea": return element.string if element.string is not None else "" if element.name == "input" and element.has_attr("type"): if element["type"] in ("checkbox", "radio"): if not element.has_attr("checked"): return False if element.has_attr("value"): return element["value"] else: return "on" if element.has_attr("value"): return element["value"] return ""
409,324
Make the given (relative) URL absolute. Args: base (str): The absolute URL the relative url was found on. relative (str): The (possibly relative) url to make absolute. Returns: str: The absolute URL.
def make_absolute(base, relative): # Python 3.4 and lower do not remove folder traversal strings. # This was fixed in 3.5 (https://docs.python.org/3/whatsnew/3.5.html#urllib) while relative.startswith('/../') or relative.startswith('../'): relative = relative[3:] base_parsed = urlparse(base) new_path = base_parsed.path.rsplit('/', 1)[0] base_parsed = base_parsed._replace(path=new_path) base = base_parsed.geturl() return urljoin(base, relative)
409,328
Append the given URL with the given data OrderedDict. Args: url (str): The URL to append. data (obj): The key value OrderedDict to append to the URL. Returns: str: The new URL.
def append_with_data(url, data): if data is None: return url url_parts = list(urlparse(url)) query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True)) query.update(data) url_parts[4] = URLHelper.query_dict_to_string(query) return urlunparse(url_parts)
409,329
Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise.
def is_parsable(url): try: parsed = urlparse(url) URLHelper.__cache[url] = parsed return True except: return False
409,330
Get the protocol (e.g. http, https or ftp) of the given URL. Args: url (str): The URL to get the protocol from. Returns: str: The URL protocol
def get_protocol(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return URLHelper.__cache[url].scheme
409,331
Get the subdomain of the given URL. Args: url (str): The URL to get the subdomain from. Returns: str: The subdomain(s)
def get_subdomain(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return ".".join(URLHelper.__cache[url].netloc.split(".")[:-2])
409,332
Get the hostname of the given URL. Args: url (str): The URL to get the hostname from. Returns: str: The hostname
def get_hostname(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) parts = URLHelper.__cache[url].netloc.split(".") if len(parts) == 1: return parts[0] else: return ".".join(parts[-2:-1])
409,333
Get the tld of the given URL. Args: url (str): The URL to get the tld from. Returns: str: The tld
def get_tld(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) parts = URLHelper.__cache[url].netloc.split(".") if len(parts) == 1: return "" else: return parts[-1]
409,334
Get the path (e.g /page/23) of the given URL. Args: url (str): The URL to get the path from. Returns: str: The path
def get_path(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return URLHelper.__cache[url].path
409,335
Get the query parameters of the given URL in alphabetical order. Args: url (str): The URL to get the query parameters from. Returns: str: The query parameters
def get_ordered_params(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query) return OrderedDict(sorted(params.items()))
409,336
Convert an OrderedDict to a query string. Args: query (obj): The key value object with query params. Returns: str: The query string. Note: This method does the same as urllib.parse.urlencode except that it doesn't actually encode the values.
def query_dict_to_string(query): query_params = [] for key, value in query.items(): query_params.append(key + "=" + value) return "&".join(query_params)
409,337
Convert a string to a query dict. Args: query (str): The query string. Returns: obj: The key value object with query params. Note: This method does the same as urllib.parse.parse_qsl except that it doesn't actually decode the values.
def query_string_to_dict(query): query_params = {} for key_value in query.split("&"): key_value_pair = key_value.split("=", 1) key = key_value_pair[0] if len(key_value_pair) >= 1 else "" value = key_value_pair[1] if len(key_value_pair) == 2 else "" query_params[key] = value return query_params
409,338
Convert the given GitHub RST contents to PyPi RST contents (since some RST directives are not available in PyPi). Args: contents (str): The GitHub compatible RST contents. Returns: str: The PyPi compatible RST contents.
def rst_to_pypi(contents): # The PyPi description does not support the SVG file type. contents = contents.replace(".svg?pypi=png.from.svg", ".png") # Convert ``<br class="title">`` to a H1 title asterisks_length = len(PackageHelper.get_name()) asterisks = "*" * asterisks_length title = asterisks + "\n" + PackageHelper.get_name() + "\n" + asterisks; contents = re.sub(r"(\.\. raw\:\: html\n)(\n {2,4})(\<br class=\"title\"\>)", title, contents) # The PyPi description does not support raw HTML contents = re.sub(r"(\.\. raw\:\: html\n)((\n {2,4})([A-Za-z0-9<>\ =\"\/])*)*", "", contents) return contents
409,347
Constructs a crawler thread instance Args: callback (obj): The method to call when finished callback_lock (bool): The callback lock that prevents race conditions. options (:class:`nyawc.Options`): The settins/options object. queue_item (:class:`nyawc.QueueItem`): The queue item containing a request to execute.
def __init__(self, callback, callback_lock, options, queue_item): threading.Thread.__init__(self) self.__callback = callback self.__callback_lock = callback_lock self.__options = options self.__queue_item = queue_item
409,349
Constructs a Crawler instance. Args: options (:class:`nyawc.Options`): The options to use for the current crawling runtime.
def __init__(self, options): self.queue = Queue(options) self.routing = Routing(options) self.__options = options self.__should_spawn_new_requests = False self.__should_stop = False self.__stopping = False self.__stopped = False self.__threads = {} self.__lock = threading.Lock() signal.signal(signal.SIGINT, self.__signal_handler) DebugHelper.setup(self.__options)
409,351
Start the crawler using the given request. Args: request (:class:`nyawc.http.Request`): The startpoint for the crawler.
def start_with(self, request): HTTPRequestHelper.patch_with_options(request, self.__options) self.queue.add_request(request) self.__crawler_start()
409,352
Execute the request in given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair to scrape.
def __request_start(self, queue_item): try: action = self.__options.callbacks.request_before_start(self.queue, queue_item) except Exception as e: action = None print(e) print(traceback.format_exc()) if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_SKIP_TO_NEXT: self.queue.move(queue_item, QueueItem.STATUS_FINISHED) self.__should_spawn_new_requests = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.queue.move(queue_item, QueueItem.STATUS_IN_PROGRESS) thread = CrawlerThread(self.__request_finish, self.__lock, self.__options, queue_item) self.__threads[queue_item.get_hash()] = thread thread.daemon = True thread.start()
409,358
Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored).
def __request_finish(self, queue_item, new_requests, request_failed=False): if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.__should_spawn_new_requests = True
409,359
Convert the scraped requests to queue items, return them and also add them to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. Returns: list(:class:`nyawc.QueueItem`): The new queue items.
def __add_scraped_requests_to_queue(self, queue_item, scraped_requests): new_queue_items = [] for scraped_request in scraped_requests: HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item) if not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope): continue if self.queue.has_request(scraped_request): continue scraped_request.depth = queue_item.request.depth + 1 if self.__options.scope.max_depth is not None: if scraped_request.depth > self.__options.scope.max_depth: continue new_queue_item = self.queue.add_request(scraped_request) new_queue_items.append(new_queue_item) return new_queue_items
409,360
Construct the HTMLSoupLinkScraper instance. Args: options (:class:`nyawc.Options`): The settins/options object. queue_item (:class:`nyawc.QueueItem`): The queue item containing a response the scrape.
def __init__(self, options, queue_item): self.options = options self.queue_item = queue_item
409,361
Initialize debug/logging in third party libraries correctly. Args: options (:class:`nyawc.Options`): The options to use for the current crawling runtime.
def setup(options): if not options.misc.debug: requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning )
409,363
Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float): the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int): a manually selected value for the nvals parameter that should be plotted in comparison to the default choices
def hurst_compare_nvals(data, nvals=None): import matplotlib.pyplot as plt data = np.asarray(data) n_all = np.arange(2,len(data)+1) dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly") dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly") n_def = np.round(np.exp(dd_def[1][0])).astype("int32") n_div = n_all[np.where(len(data) % n_all[:-1] == 0)] dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly") def corr(nvals): return [np.log(nolds.expected_rs(n)) for n in nvals] l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o") l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o") l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o") l_cst = [] t_cst = [] if nvals is not None: dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly") l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o") l_cst = l_cst t_cst = ["custom"] plt.xlabel("log(n)") plt.ylabel("log((R/S)_n - E[(R/S)_n])") plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst) labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"]) for data, label in labeled_data: print("%s: %.3f" % (label, data)) if nvals is not None: print("custom: %.3f" % dd_cst[0]) plt.show()
409,790
Generates fractional brownian motions of desired length. Author: Christian Thomae References: .. [fbm_1] https://en.wikipedia.org/wiki/Fractional_Brownian_motion#Method_1_of_simulation Args: n (int): length of sequence to generate Kwargs: H (float): hurst parameter Returns: array of float: simulated fractional brownian motion
def fbm(n, H=0.75): # TODO more detailed description of fbm assert H > 0 and H < 1 def R(t, s): twoH = 2 * H return 0.5 * (s**twoH + t**twoH - np.abs(t - s)**twoH) # form the matrix tau gamma = R(*np.mgrid[0:n, 0:n]) # apply R to every element in matrix w, P = np.linalg.eigh(gamma) L = np.diag(w) sigma = np.dot(np.dot(P, np.sqrt(L)), np.linalg.inv(P)) v = np.random.randn(n) return np.dot(sigma, v)
409,791
Creates an array of n true random numbers obtained from the quantum random number generator at qrng.anu.edu.au This function requires the package quantumrandom and an internet connection. Args: n (int): length of the random array Return: array of ints: array of truly random unsigned 16 bit int values
def qrandom(n): import quantumrandom return np.concatenate([ quantumrandom.get_data(data_type='uint16', array_length=1024) for i in range(int(np.ceil(n/1024.0))) ])[:n]
409,792
Perform a time-delay embedding of a time series Args: data (array-like): the data that should be embedded emb_dim (int): the embedding dimension Kwargs: lag (int): the lag between elements in the embedded vectors Returns: emb_dim x m array: matrix of embedded vectors of the form [data[i], data[i+lag], data[i+2*lag], ... data[i+(emb_dim-1)*lag]] for i in 0 to m-1 (m = len(data)-(emb_dim-1)*lag)
def delay_embedding(data, emb_dim, lag=1): data = np.asarray(data) min_len = (emb_dim - 1) * lag + 1 if len(data) < min_len: msg = "cannot embed data of length {} with embedding dimension {} " \ + "and lag {}, minimum required length is {}" raise ValueError(msg.format(len(data), emb_dim, lag, min_len)) m = len(data) - min_len + 1 indices = np.repeat([np.arange(emb_dim) * lag], m, axis=0) indices += np.arange(m).reshape((m, 1)) return data[indices]
409,800
Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n
def binary_n(total_N, min_n=50): max_exp = np.log2(1.0 * total_N / min_n) max_exp = int(np.floor(max_exp)) return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)]
409,807
Creates a list of values by successively multiplying a minimum value min_n by a factor > 1 until a maximum value max_n is reached. Args: min_n (float): minimum value (must be < max_n) max_n (float): maximum value (must be > min_n) factor (float): factor used to increase min_n (must be > 1) Returns: list of floats: min_n, min_n * factor, min_n * factor^2, ... min_n * factor^i < max_n
def logarithmic_r(min_n, max_n, factor): assert max_n > min_n assert factor > 1 max_i = int(np.floor(np.log(1.0 * max_n / min_n) / np.log(factor))) return [min_n * (factor ** i) for i in range(max_i + 1)]
409,810
Calculates the expected (R/S)_n for white noise for a given n. This is used as a correction factor in the function hurst_rs. It uses the formula of Anis-Lloyd-Peters (see [h_3]_). Args: n (int): the value of n for which the expected (R/S)_n should be calculated Returns: float: expected (R/S)_n for white noise
def expected_rs(n): front = (n - 0.5) / n i = np.arange(1,n) back = np.sum(np.sqrt((n - i) / i)) if n <= 340: middle = math.gamma((n-1) * 0.5) / math.sqrt(math.pi) / math.gamma(n * 0.5) else: middle = 1.0 / math.sqrt(n * math.pi * 0.5) return front * middle * back
409,811
Uses expected_rs to calculate the expected value for the Hurst exponent h based on the values of n used for the calculation. Args: nvals (iterable of int): the values of n used to calculate the individual (R/S)_n KWargs: fit (str): the fitting method to use for the line fit, either 'poly' for normal least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which is more robust to outliers Returns: float: expected h for white noise
def expected_h(nvals, fit="RANSAC"): rsvals = [expected_rs(n) for n in nvals] poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit) return poly[0]
409,812
Init. Args: retry_params: an RetryParams instance. retriable_exceptions: a list of exception classes that are retriable. should_retry: a function that takes a result from the tasklet and returns a boolean. True if the result should be retried.
def __init__(self, retry_params, retriable_exceptions=_RETRIABLE_EXCEPTIONS, should_retry=lambda r: False): self.retry_params = retry_params self.retriable_exceptions = retriable_exceptions self.should_retry = should_retry
409,823
Run a tasklet with retry. The retry should be transparent to the caller: if no results are successful, the exception or result from the last retry is returned to the caller. Args: tasklet: the tasklet to run. **kwds: keywords arguments to run the tasklet. Raises: The exception from running the tasklet. Returns: The result from running the tasklet.
def run(self, tasklet, **kwds): start_time = time.time() n = 1 while True: e = None result = None got_result = False try: result = yield tasklet(**kwds) got_result = True if not self.should_retry(result): raise ndb.Return(result) except runtime.DeadlineExceededError: logging.debug( 'Tasklet has exceeded request deadline after %s seconds total', time.time() - start_time) raise except self.retriable_exceptions as e: pass if n == 1: logging.debug('Tasklet is %r', tasklet) delay = self.retry_params.delay(n, start_time) if delay <= 0: logging.debug( 'Tasklet failed after %s attempts and %s seconds in total', n, time.time() - start_time) if got_result: raise ndb.Return(result) elif e is not None: raise e else: assert False, 'Should never reach here.' if got_result: logging.debug( 'Got result %r from tasklet.', result) else: logging.debug( 'Got exception "%r" from tasklet.', e) logging.debug('Retry in %s seconds.', delay) n += 1 yield tasklets.sleep(delay)
409,824
Check init arguments. Args: name: name of the argument. For logging purpose. val: value. Value has to be non negative number. can_be_zero: whether value can be zero. val_type: Python type of the value. Returns: The value. Raises: ValueError: when invalid value is passed in. TypeError: when invalid value type is passed in.
def _check(cls, name, val, can_be_zero=False, val_type=float): valid_types = [val_type] if val_type is float: valid_types.append(int) if type(val) not in valid_types: raise TypeError( 'Expect type %s for parameter %s' % (val_type.__name__, name)) if val < 0: raise ValueError( 'Value for parameter %s has to be greater than 0' % name) if not can_be_zero and val == 0: raise ValueError( 'Value for parameter %s can not be 0' % name) return val
409,826
Calculate delay before the next retry. Args: n: the number of current attempt. The first attempt should be 1. start_time: the time when retry started in unix time. Returns: Number of seconds to wait before next retry. -1 if retry should give up.
def delay(self, n, start_time): if (n > self.max_retries or (n > self.min_retries and time.time() - start_time > self.max_retry_period)): return -1 return min( math.pow(self.backoff_factor, n-1) * self.initial_delay, self.max_delay)
409,827
Delete a Google Cloud Storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Raises: errors.NotFoundError: if the file doesn't exist prior to deletion.
def delete(filename, retry_params=None, _account_id=None): api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) common.validate_file_path(filename) filename = api_utils._quote_filename(filename) status, resp_headers, content = api.delete_object(filename) errors.check_status(status, [204], filename, resp_headers=resp_headers, body=content)
409,829
Returns the location for the given bucket. https://cloud.google.com/storage/docs/bucket-locations Args: bucket: A Google Cloud Storage bucket of form '/bucket'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: The location as a string. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if the bucket does not exist.
def get_location(bucket, retry_params=None, _account_id=None): return _get_bucket_attribute(bucket, 'location', 'LocationConstraint', retry_params=retry_params, _account_id=_account_id)
409,830
Returns the storage class for the given bucket. https://cloud.google.com/storage/docs/storage-classes Args: bucket: A Google Cloud Storage bucket of form '/bucket'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: The storage class as a string. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if the bucket does not exist.
def get_storage_class(bucket, retry_params=None, _account_id=None): return _get_bucket_attribute(bucket, 'storageClass', 'StorageClass', retry_params=retry_params, _account_id=_account_id)
409,831
Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing info about this file. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't.
def stat(filename, retry_params=None, _account_id=None): common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) status, headers, content = api.head_object( api_utils._quote_filename(filename)) errors.check_status(status, [200], filename, resp_headers=headers, body=content) file_stat = common.GCSFileStat( filename=filename, st_size=common.get_stored_content_length(headers), st_ctime=common.http_time_to_posix(headers.get('last-modified')), etag=headers.get('etag'), content_type=headers.get('content-type'), metadata=common.get_metadata(headers)) return file_stat
409,833
Copy the file content from src to dst. Args: src: /bucket/filename dst: /bucket/filename metadata: a dict of metadata for this copy. If None, old metadata is copied. For example, {'x-goog-meta-foo': 'bar'}. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't.
def copy2(src, dst, metadata=None, retry_params=None): common.validate_file_path(src) common.validate_file_path(dst) if metadata is None: metadata = {} copy_meta = 'COPY' else: copy_meta = 'REPLACE' metadata.update({'x-goog-copy-source': src, 'x-goog-metadata-directive': copy_meta}) api = storage_api._get_storage_api(retry_params=retry_params) status, resp_headers, content = api.put_object( api_utils._quote_filename(dst), headers=metadata) errors.check_status(status, [200], src, metadata, resp_headers, body=content)
409,834
Initialize. Args: api: storage_api instance. path: bucket path of form '/bucket'. options: a dict of listbucket options. Please see listbucket doc.
def __init__(self, api, path, options): self._init(api, path, options)
409,838
Generator for next file element in the document. Args: root: root element of the XML tree. Yields: GCSFileStat for the next file.
def _next_file_gen(self, root): for e in root.getiterator(common._T_CONTENTS): st_ctime, size, etag, key = None, None, None, None for child in e.getiterator('*'): if child.tag == common._T_LAST_MODIFIED: st_ctime = common.dt_str_to_posix(child.text) elif child.tag == common._T_ETAG: etag = child.text elif child.tag == common._T_SIZE: size = child.text elif child.tag == common._T_KEY: key = child.text yield common.GCSFileStat(self._path + '/' + key, size, etag, st_ctime) e.clear() yield None
409,842
Generator for next directory element in the document. Args: root: root element in the XML tree. Yields: GCSFileStat for the next directory.
def _next_dir_gen(self, root): for e in root.getiterator(common._T_COMMON_PREFIXES): yield common.GCSFileStat( self._path + '/' + e.find(common._T_PREFIX).text, st_size=None, etag=None, st_ctime=None, is_dir=True) e.clear() yield None
409,843
Whether to issue another GET bucket call. Args: content: response XML. Returns: True if should, also update self._options for the next request. False otherwise.
def _should_get_another_batch(self, content): if ('max-keys' in self._options and self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT): return False elements = self._find_elements( content, set([common._T_IS_TRUNCATED, common._T_NEXT_MARKER])) if elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true': return False next_marker = elements.get(common._T_NEXT_MARKER) if next_marker is None: self._options.pop('marker', None) return False self._options['marker'] = next_marker return True
409,844
Find interesting elements from XML. This function tries to only look for specified elements without parsing the entire XML. The specified elements is better located near the beginning. Args: result: response XML. elements: a set of interesting element tags. Returns: A dict from element tag to element value.
def _find_elements(self, result, elements): element_mapping = {} result = StringIO.StringIO(result) for _, e in ET.iterparse(result, events=('end',)): if not elements: break if e.tag in elements: element_mapping[e.tag] = e.text elements.remove(e.tag) return element_mapping
409,845
Create a file. The retry_params specified in the open call will override the default retry params for this particular file handle. Args: filename: filename.
def create_file(self, filename): self.response.write('Creating file %s\n' % filename) write_retry_params = gcs.RetryParams(backoff_factor=1.1) gcs_file = gcs.open(filename, 'w', content_type='text/plain', options={'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params) gcs_file.write('abcde\n') gcs_file.write('f'*1024*4 + '\n') gcs_file.close() self.tmp_filenames_to_clean_up.append(filename)
409,847
Create several files and paginate through them. Production apps should set page_size to a practical value. Args: bucket: bucket.
def list_bucket(self, bucket): self.response.write('Listbucket result:\n') page_size = 1 stats = gcs.listbucket(bucket + '/foo', max_keys=page_size) while True: count = 0 for stat in stats: count += 1 self.response.write(repr(stat)) self.response.write('\n') if count != page_size or count == 0: break stats = gcs.listbucket(bucket + '/foo', max_keys=page_size, marker=stat.filename)
409,852
Create a GCS file with GCS client lib. Args: filename: GCS filename. Returns: The corresponding string blobkey for this GCS file.
def CreateFile(filename): with gcs.open(filename, 'w') as f: f.write('abcde\n') blobstore_filename = '/gs' + filename return blobstore.create_gs_key(blobstore_filename)
409,855
Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch.
def _make_token_async(scopes, service_account_id): rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) token, expires_at = yield rpc raise ndb.Return((token, expires_at))
409,857
Helper to synthesize a synchronous method from an async method name. Used by the @add_sync_methods class decorator below. Args: name: The name of the synchronous method. Returns: A method (with first argument 'self') that retrieves and calls self.<name>, passing its own arguments, expects it to return a Future, and then waits for and returns that Future's result.
def _make_sync_method(name): def sync_wrapper(self, *args, **kwds): method = getattr(self, name) future = method(*args, **kwds) return future.get_result() return sync_wrapper
409,858
Class decorator to add synchronous methods corresponding to async methods. This modifies the class in place, adding additional methods to it. If a synchronous method of a given name already exists it is not replaced. Args: cls: A class. Returns: The same class, modified in place.
def add_sync_methods(cls): for name in cls.__dict__.keys(): if name.endswith('_async'): sync_name = name[:-6] if not hasattr(cls, sync_name): setattr(cls, sync_name, _make_sync_method(name)) return cls
409,859
Constructor. Args: scopes: A scope or a list of scopes. service_account_id: Internal use only. token_maker: An asynchronous function of the form (scopes, service_account_id) -> (token, expires). retry_params: An instance of api_utils.RetryParams. If None, the default for current thread will be used.
def __init__(self, scopes, service_account_id=None, token_maker=None, retry_params=None): if isinstance(scopes, basestring): scopes = [scopes] self.scopes = scopes self.service_account_id = service_account_id self.make_token_async = token_maker or _config.TOKEN_MAKER if not retry_params: retry_params = api_utils._get_default_retry_params() self.retry_params = retry_params self.user_agent = {'User-Agent': retry_params._user_agent} self.expiration_headroom = random.randint(60, 240)
409,860
Issue one HTTP request. It performs async retries using tasklets. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. Yields: The async fetch of the url.
def do_request_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None): retry_wrapper = api_utils._RetryWrapper( self.retry_params, retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS, should_retry=api_utils._should_retry) resp = yield retry_wrapper.run( self.urlfetch_async, url=url, method=method, headers=headers, payload=payload, deadline=deadline, callback=callback, follow_redirects=False) raise ndb.Return((resp.status_code, resp.headers, resp.content))
409,863
Get an authentication token. The token is cached in memcache, keyed by the scopes argument. Uses a random token expiration headroom value generated in the constructor to eliminate a burst of GET_ACCESS_TOKEN API requests. Args: refresh: If True, ignore a cached token; default False. Yields: An authentication token. This token is guaranteed to be non-expired.
def get_token_async(self, refresh=False): key = '%s,%s' % (self.service_account_id, ','.join(self.scopes)) ts = yield _AE_TokenStorage_.get_by_id_async( key, use_cache=True, use_memcache=self.retry_params.memcache_access_token, use_datastore=self.retry_params.save_access_token) if refresh or ts is None or ts.expires < ( time.time() + self.expiration_headroom): token, expires_at = yield self.make_token_async( self.scopes, self.service_account_id) timeout = int(expires_at - time.time()) ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at) if timeout > 0: yield ts.put_async(memcache_timeout=timeout, use_datastore=self.retry_params.save_access_token, force_writes=True, use_cache=True, use_memcache=self.retry_params.memcache_access_token) raise ndb.Return(ts.token)
409,864
Return the content length (in bytes) of the object as stored in GCS. x-goog-stored-content-length should always be present except when called via the local dev_appserver. Therefore if it is not present we default to the standard content-length header. Args: headers: a dict of headers from the http response. Returns: the stored content length.
def get_stored_content_length(headers): length = headers.get('x-goog-stored-content-length') if length is None: length = headers.get('content-length') return length
409,866
Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None.
def _process_path_prefix(path_prefix): _validate_path(path_prefix) if not _GCS_PATH_PREFIX_REGEX.match(path_prefix): raise ValueError('Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix) bucket_name_end = path_prefix.find('/', 1) bucket = path_prefix prefix = None if bucket_name_end != -1: bucket = path_prefix[:bucket_name_end] prefix = path_prefix[bucket_name_end + 1:] or None return bucket, prefix
409,868
Basic validation of Google Storage paths. Args: path: a Google Storage path. It should have form '/bucket/filename' or '/bucket'. Raises: ValueError: if path is invalid. TypeError: if path is not of type basestring.
def _validate_path(path): if not path: raise ValueError('Path is empty') if not isinstance(path, basestring): raise TypeError('Path should be a string but is %s (%s).' % (path.__class__, path))
409,869
Validate Google Cloud Storage options. Args: options: a str->basestring dict of options to pass to Google Cloud Storage. Raises: ValueError: if option is not supported. TypeError: if option is not of type str or value of an option is not of type basestring.
def validate_options(options): if not options: return for k, v in options.iteritems(): if not isinstance(k, str): raise TypeError('option %r should be a str.' % k) if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS): raise ValueError('option %s is not supported.' % k) if not isinstance(v, basestring): raise TypeError('value %r for option %s should be of type basestring.' % (v, k))
409,870
Reverse of str_to_datetime. This is used by GCS stub to generate GET bucket XML response. Args: posix: A float of secs from unix epoch. Returns: A datetime str.
def posix_to_dt_str(posix): dt = datetime.datetime.utcfromtimestamp(posix) dt_str = dt.strftime(_DT_FORMAT) return dt_str + '.000Z'
409,872
Restore state as part of deserialization/unpickling. Args: state: the tuple from a __getstate__ call
def __setstate__(self, state): superstate, localstate = state super(_StorageApi, self).__setstate__(superstate) self.api_url = localstate['api_url']
409,880
COMPOSE multiple objects together. Using the given list of files, calls the put object with the compose flag. This call merges all the files into the destination file. Args: file_list: list of dicts with the file name. destination_file: Path to the destination file. content_type: Content type for the destination file.
def compose_object(self, file_list, destination_file, content_type): xml_setting_list = ['<ComposeRequest>'] for meta_data in file_list: xml_setting_list.append('<Component>') for key, val in meta_data.iteritems(): xml_setting_list.append('<%s>%s</%s>' % (key, val, key)) xml_setting_list.append('</Component>') xml_setting_list.append('</ComposeRequest>') xml = ''.join(xml_setting_list) if content_type is not None: headers = {'Content-Type': content_type} else: headers = None status, resp_headers, content = self.put_object( api_utils._quote_filename(destination_file) + '?compose', payload=xml, headers=headers) errors.check_status(status, [200], destination_file, resp_headers, body=content)
409,888
Constructor. Args: api: A StorageApi instance. path: Quoted/escaped path to the object, e.g. /mybucket/myfile buffer_size: buffer size. The ReadBuffer keeps one buffer. But there may be a pending future that contains a second buffer. This size must be less than max_request_size. max_request_size: Max bytes to request in one urlfetch. offset: Number of bytes to skip at the start of the file. If None, 0 is used.
def __init__(self, api, path, buffer_size=DEFAULT_BUFFER_SIZE, max_request_size=MAX_REQUEST_SIZE, offset=0): self._api = api self._path = path self.name = api_utils._unquote_filename(path) self.closed = False assert buffer_size <= max_request_size self._buffer_size = buffer_size self._max_request_size = max_request_size self._offset = offset self._buffer = _Buffer() self._etag = None get_future = self._get_segment(offset, self._buffer_size, check_response=False) status, headers, content = self._api.head_object(path) errors.check_status(status, [200], path, resp_headers=headers, body=content) self._file_size = long(common.get_stored_content_length(headers)) self._check_etag(headers.get('etag')) self._buffer_future = None if self._file_size != 0: content, check_response_closure = get_future.get_result() check_response_closure() self._buffer.reset(content) self._request_next_buffer()
409,889
Restore state as part of deserialization/unpickling. Args: state: the dictionary from a __getstate__ call Along with restoring the state, pre-fetch the next read buffer.
def __setstate__(self, state): self._api = state['api'] self._path = state['path'] self.name = api_utils._unquote_filename(self._path) self._buffer_size = state['buffer_size'] self._max_request_size = state['request_size'] self._etag = state['etag'] self._file_size = state['size'] self._offset = state['offset'] self._buffer = _Buffer() self.closed = state['closed'] self._buffer_future = None if self._remaining() and not self.closed: self._request_next_buffer()
409,891
Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is closed.
def read(self, size=-1): self._check_open() if not self._remaining(): return '' data_list = [] while True: remaining = self._buffer.remaining() if size >= 0 and size < remaining: data_list.append(self._buffer.read(size)) self._offset += size break else: size -= remaining self._offset += remaining data_list.append(self._buffer.read()) if self._buffer_future is None: if size < 0 or size >= self._remaining(): needs = self._remaining() else: needs = size data_list.extend(self._get_segments(self._offset, needs)) self._offset += needs break if self._buffer_future: self._buffer.reset(self._buffer_future.get_result()) self._buffer_future = None if self._buffer_future is None: self._request_next_buffer() return ''.join(data_list)
409,893
Get segments of the file from Google Storage as a list. A large request is broken into segments to avoid hitting urlfetch response size limit. Each segment is returned from a separate urlfetch. Args: start: start offset to request. Inclusive. Have to be within the range of the file. request_size: number of bytes to request. Returns: A list of file segments in order
def _get_segments(self, start, request_size): if not request_size: return [] end = start + request_size futures = [] while request_size > self._max_request_size: futures.append(self._get_segment(start, self._max_request_size)) request_size -= self._max_request_size start += self._max_request_size if start < end: futures.append(self._get_segment(start, end - start)) return [fut.get_result() for fut in futures]
409,895
Set the file's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (seek relative to the end, offset should be negative). Raises: IOError: When this buffer is closed. ValueError: When whence is invalid.
def seek(self, offset, whence=os.SEEK_SET): self._check_open() self._buffer.reset() self._buffer_future = None if whence == os.SEEK_SET: self._offset = offset elif whence == os.SEEK_CUR: self._offset += offset elif whence == os.SEEK_END: self._offset = self._file_size + offset else: raise ValueError('Whence mode %s is invalid.' % str(whence)) self._offset = min(self._offset, self._file_size) self._offset = max(self._offset, 0) if self._remaining(): self._request_next_buffer()
409,898
Returns bytes from self._buffer and update related offsets. Args: size: number of bytes to read starting from current offset. Read the entire buffer if negative. Returns: Requested bytes from buffer.
def read(self, size=-1): if size < 0: offset = len(self._buffer) else: offset = self._offset + size return self.read_to_offset(offset)
409,900
Returns bytes from self._buffer and update related offsets. Args: offset: read from current offset to this offset, exclusive. Returns: Requested bytes from buffer.
def read_to_offset(self, offset): assert offset >= self._offset result = self._buffer[self._offset: offset] self._offset += len(result) return result
409,901
Search for newline char in buffer starting from current offset. Args: size: number of bytes to search. -1 means all. Returns: offset of newline char in buffer. -1 if doesn't exist.
def find_newline(self, size=-1): if size < 0: return self._buffer.find('\n', self._offset) return self._buffer.find('\n', self._offset, self._offset + size)
409,902
Constructor. Args: api: A StorageApi instance. path: Quoted/escaped path to the object, e.g. /mybucket/myfile content_type: Optional content-type; Default value is delegate to Google Cloud Storage. gcs_headers: additional gs headers as a str->str dict, e.g {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}. Raises: IOError: When this location can not be found.
def __init__(self, api, path, content_type=None, gcs_headers=None): assert self._maxrequestsize > self._blocksize assert self._maxrequestsize % self._blocksize == 0 assert self._maxrequestsize >= self._flushsize self._api = api self._path = path self.name = api_utils._unquote_filename(path) self.closed = False self._buffer = collections.deque() self._buffered = 0 self._written = 0 self._offset = 0 headers = {'x-goog-resumable': 'start'} if content_type: headers['content-type'] = content_type if gcs_headers: headers.update(gcs_headers) status, resp_headers, content = self._api.post_object(path, headers=headers) errors.check_status(status, [201], path, headers, resp_headers, body=content) loc = resp_headers.get('location') if not loc: raise IOError('No location header found in 201 response') parsed = urlparse.urlparse(loc) self._path_with_token = '%s?%s' % (self._path, parsed.query)
409,903
Restore state as part of deserialization/unpickling. Args: state: the dictionary from a __getstate__ call
def __setstate__(self, state): self._api = state['api'] self._path_with_token = state['path_token'] self._buffer = state['buffer'] self._buffered = state['buffered'] self._written = state['written'] self._offset = state['offset'] self.closed = state['closed'] self._path = state['path'] self.name = api_utils._unquote_filename(self._path)
409,905
Write some bytes. Args: data: data to write. str. Raises: TypeError: if data is not of type str.
def write(self, data): self._check_open() if not isinstance(data, str): raise TypeError('Expected str but got %s.' % type(data)) if not data: return self._buffer.append(data) self._buffered += len(data) self._offset += len(data) if self._buffered >= self._flushsize: self._flush()
409,906
Send the block to the storage service. This is a utility method that does not modify self. Args: data: data to send in str. start_offset: start offset of the data in relation to the file. file_len: an int if this is the last data to append to the file. Otherwise '*'.
def _send_data(self, data, start_offset, file_len): headers = {} end_offset = start_offset + len(data) - 1 if data: headers['content-range'] = ('bytes %d-%d/%s' % (start_offset, end_offset, file_len)) else: headers['content-range'] = ('bytes */%s' % file_len) status, response_headers, content = self._api.put_object( self._path_with_token, payload=data, headers=headers) if file_len == '*': expected = 308 else: expected = 200 errors.check_status(status, [expected], self._path, headers, response_headers, content, {'upload_path': self._path_with_token})
409,909
Close this buffer on file_length. Finalize this upload immediately on file_length. Contents that are still in memory will not be uploaded. This is a utility method that does not modify self. Args: file_length: file length. Must match what has been uploaded. If None, it will be queried from GCS.
def _force_close(self, file_length=None): if file_length is None: file_length = self._get_offset_from_gcs() + 1 self._send_data('', 0, file_length)
409,911
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]: jsonschema_validate(request, schema) return request
410,139
Validates arguments and then calls the method. Args: method: The method to call. *args, **kwargs: Arguments to the method. Returns: The "result" part of the JSON-RPC response (the return value from the method). Raises: TypeError: If arguments don't match function signature.
def call(method: Method, *args: Any, **kwargs: Any) -> Any: return validate_args(method, *args, **kwargs)(*args, **kwargs)
410,140
Call a Request, catching exceptions to ensure we always return a Response. Args: request: The Request object. methods: The list of methods that can be called. debug: Include more information in error responses. Returns: A Response object.
def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response: with handle_exceptions(request, debug) as handler: result = call(methods.items[request.method], *request.args, **request.kwargs) handler.response = SuccessResponse(result=result, id=request.id) return handler.response
410,142
Takes a request or list of Requests and calls them. Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. debug: Include more information in error responses.
def call_requests( requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool ) -> Response: if isinstance(requests, collections.Iterable): return BatchResponse(safe_call(r, methods, debug=debug) for r in requests) return safe_call(requests, methods, debug=debug)
410,143
Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them.
def create_requests( requests: Union[Dict, List], *, context: Any = NOCONTEXT, convert_camel_case: bool ) -> Union[Request, Set[Request]]: if isinstance(requests, list): return { Request(context=context, convert_camel_case=convert_camel_case, **request) for request in requests } return Request(context=context, convert_camel_case=convert_camel_case, **requests)
410,144
A basic way to serve the methods. Args: name: Server address. port: Server port.
def serve(name: str = "", port: int = 5000) -> None: logging.info(" * Listening on port %s", port) httpd = HTTPServer((name, port), RequestHandler) httpd.serve_forever()
410,147
Log a request or response Args: message: JSON-RPC request or response string. logger: level: Log level. extra: More details to include in the log entry. trim: Abbreviate log messages.
def log_( message: str, logger: logging.Logger, level: int = logging.INFO, extra: Optional[Dict] = None, trim: bool = False, ) -> None: if extra is None: extra = {} # Clean up the message for logging if message: message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{") if trim: message = _trim_message(message) # Log. logger.log(level, message, extra=extra)
410,159
Check if the request's arguments match a function's signature. Raises TypeError exception if arguments cannot be passed to a function. Args: func: The function to check. args: Positional arguments. kwargs: Keyword arguments. Raises: TypeError: If the arguments cannot be passed to the function.
def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method: signature(func).bind(*args, **kwargs) return func
410,172
Return a hash of all of the given files at the given root. Adapted from stacker.hooks.aws_lambda; used according to its license: https://github.com/cloudtools/stacker/blob/1.4.0/LICENSE Args: files (list[str]): file names to include in the hash calculation, relative to ``root``. root (str): base directory to analyze files in. Returns: str: A hash of the hashes of the given files.
def calculate_hash_of_files(files, root): file_hash = hashlib.md5() for fname in sorted(files): fileobj = os.path.join(root, fname) file_hash.update((fname + "\0").encode()) with open(fileobj, "rb") as filedes: for chunk in iter(lambda: filedes.read(4096), ""): # noqa pylint: disable=cell-var-from-loop if not chunk: break file_hash.update(chunk) file_hash.update("\0".encode()) return file_hash.hexdigest()
410,186
Sets the agent availability to True. Args: show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
def set_available(self, show=None): show = self.state.show if show is None else show self.set_presence(PresenceState(available=True, show=show))
410,304
Change the presence broadcast by the client. If the client is currently connected, the new presence is broadcast immediately. Args: state(aioxmpp.PresenceState, optional): New presence state to broadcast (Default value = None) status(dict or str, optional): New status information to broadcast (Default value = None) priority (int, optional): New priority for the resource (Default value = None)
def set_presence(self, state=None, status=None, priority=None): state = state if state is not None else self.state status = status if status is not None else self.status priority = priority if priority is not None else self.priority self.presenceserver.set_presence(state, status, priority)
410,306
Returns a contact Args: jid (aioxmpp.JID): jid of the contact Returns: dict: the roster of contacts
def get_contact(self, jid): try: return self.get_contacts()[jid.bare()] except KeyError: raise ContactNotFound except AttributeError: raise AttributeError("jid must be an aioxmpp.JID object")
410,308
Asks for subscription Args: peer_jid (str): the JID you ask for subscriptiion
def subscribe(self, peer_jid): self.roster.subscribe(aioxmpp.JID.fromstr(peer_jid).bare())
410,310
Asks for unsubscription Args: peer_jid (str): the JID you ask for unsubscriptiion
def unsubscribe(self, peer_jid): self.roster.unsubscribe(aioxmpp.JID.fromstr(peer_jid).bare())
410,311
Approve a subscription request from jid Args: peer_jid (str): the JID to approve
def approve(self, peer_jid): self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())
410,312
Adds a new event to the trace store. The event may hava a category Args: event (spade.message.Message): the event to be stored category (str, optional): a category to classify the event (Default value = None)
def append(self, event, category=None): date = datetime.datetime.now() self.store.insert(0, (date, event, category)) if len(self.store) > self.size: del self.store[-1]
410,327
Returns all the events that have been received (excluding sent events), until a limit if defined Args: limit (int, optional): the max length of the events to return (Default value = None) Returns: list: a list of received events
def received(self, limit=None): return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1]
410,328
Returns the events that match the filters Args: limit (int, optional): the max length of the events to return (Default value = None) to (str, optional): only events that have been sent or received by 'to' (Default value = None) category (str, optional): only events belonging to the category (Default value = None) Returns: list: a list of filtered events
def filter(self, limit=None, to=None, category=None): if category and not to: msg_slice = itertools.islice((x for x in self.store if x[2] == category), limit) elif to and not category: to = JID.fromstr(to) msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit) elif to and category: to = JID.fromstr(to) msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1]) and x[2] == category), limit) else: msg_slice = self.all(limit=limit) return msg_slice return list(msg_slice)[::-1]
410,329
Creates an agent Args: jid (str): The identifier of the agent in the form username@server password (str): The password to connect to the server verify_security (bool): Wether to verify or not the SSL certificates
def __init__(self, jid, password, verify_security=False): self.jid = aioxmpp.JID.fromstr(jid) self.password = password self.verify_security = verify_security self.behaviours = [] self._values = {} self.conn_coro = None self.stream = None self.client = None self.message_dispatcher = None self.presence = None self.loop = None self.container = Container() self.container.register(self) self.loop = self.container.loop # Web service self.web = WebApp(agent=self) self.traces = TraceStore(size=1000) self._alive = Event()
410,333
Tells the container to start this agent. It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method. Args: auto_register (bool): register the agent in the server (Default value = True)
def start(self, auto_register=True): return self.container.start_agent(agent=self, auto_register=auto_register)
410,334
Starts the agent from a coroutine. This fires some actions: * if auto_register: register the agent in the server * runs the event loop * connects the agent to the server * runs the registered behaviours Args: auto_register (bool, optional): register the agent in the server (Default value = True)
async def _async_start(self, auto_register=True): if auto_register: await self._async_register() self.client = aioxmpp.PresenceManagedClient(self.jid, aioxmpp.make_security_layer(self.password, no_verify=not self.verify_security), loop=self.loop, logger=logging.getLogger(self.jid.localpart)) # obtain an instance of the service self.message_dispatcher = self.client.summon(SimpleMessageDispatcher) # Presence service self.presence = PresenceManager(self) await self._async_connect() # register a message callback here self.message_dispatcher.register_callback( aioxmpp.MessageType.CHAT, None, self._message_received, ) await self.setup() self._alive.set() for behaviour in self.behaviours: if not behaviour.is_running: behaviour.start()
410,335
Static method to build a gravatar url with the agent's JID Args: jid (aioxmpp.JID): an XMPP identifier Returns: str: an URL for the gravatar
def build_avatar_url(jid): digest = md5(str(jid).encode("utf-8")).hexdigest() return "http://www.gravatar.com/avatar/{md5}?d=monsterid".format(md5=digest)
410,338