_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q275200
CLI.run
test
def run(self, args): """ Pass in raw arguments, instantiate Slack API and begin client. """ args = self.parser.parse_args(args) if not args.token: raise ValueError('Supply the slack token through --token or setting DJANGOBOT_TOKEN') # Import the channel layer sys.path.insert(0, ".") module_path, object_path = args.channel_layer.split(':', 1) channel_layer = importlib.import_module(module_path) for part in object_path.split('.'): channel_layer = getattr(channel_layer, part) # Boot up the client Client( channel_layer=channel_layer, token=args.token, ).run()
python
{ "resource": "" }
q275201
dict_diff
test
def dict_diff(prv, nxt): """Return a dict of keys that differ with another config object.""" keys = set(prv.keys() + nxt.keys()) result = {} for k in keys: if prv.get(k) != nxt.get(k): result[k] = (prv.get(k), nxt.get(k)) return result
python
{ "resource": "" }
q275202
colorize
test
def colorize(msg, color): """Given a string add necessary codes to format the string.""" if DONT_COLORIZE: return msg else: return "{}{}{}".format(COLORS[color], msg, COLORS["endc"])
python
{ "resource": "" }
q275203
CallbackModule.v2_playbook_on_task_start
test
def v2_playbook_on_task_start(self, task, **kwargs): """Run when a task starts.""" self.last_task_name = task.get_name() self.printed_last_task = False
python
{ "resource": "" }
q275204
CallbackModule.v2_runner_on_ok
test
def v2_runner_on_ok(self, result, **kwargs): """Run when a task finishes correctly.""" failed = "failed" in result._result unreachable = "unreachable" in result._result if ( "print_action" in result._task.tags or failed or unreachable or self._display.verbosity > 1 ): self._print_task() self.last_skipped = False msg = unicode(result._result.get("msg", "")) or unicode( result._result.get("reason", "") ) or unicode( result._result.get("message", "") ) stderr = [ result._result.get("exception", None), result._result.get("module_stderr", None), ] stderr = "\n".join([e for e in stderr if e]).strip() self._print_host_or_item( result._host, result._result.get("changed", False), msg, result._result.get("diff", None), is_host=True, error=failed or unreachable, stdout=result._result.get("module_stdout", None), stderr=stderr.strip(), ) if "results" in result._result: for r in result._result["results"]: failed = "failed" in r stderr = [r.get("exception", None), r.get("module_stderr", None)] stderr = "\n".join([e for e in stderr if e]).strip() self._print_host_or_item( r["item"], r.get("changed", False), unicode(r.get("msg", "")), r.get("diff", None), is_host=False, error=failed, stdout=r.get("module_stdout", None), stderr=stderr.strip(), ) else: self.last_skipped = True print(".", end="")
python
{ "resource": "" }
q275205
CallbackModule.v2_playbook_on_stats
test
def v2_playbook_on_stats(self, stats): """Display info about playbook statistics.""" print() self.printed_last_task = False self._print_task("STATS") hosts = sorted(stats.processed.keys()) for host in hosts: s = stats.summarize(host) if s["failures"] or s["unreachable"]: color = "failed" elif s["changed"]: color = "changed" else: color = "ok" msg = "{} : ok={}\tchanged={}\tfailed={}\tunreachable={}".format( host, s["ok"], s["changed"], s["failures"], s["unreachable"] ) print(colorize(msg, color))
python
{ "resource": "" }
q275206
CallbackModule.v2_runner_on_skipped
test
def v2_runner_on_skipped(self, result, **kwargs): """Run when a task is skipped.""" if self._display.verbosity > 1: self._print_task() self.last_skipped = False line_length = 120 spaces = " " * (31 - len(result._host.name) - 4) line = " * {}{}- {}".format( colorize(result._host.name, "not_so_bold"), spaces, colorize("skipped", "skipped"), ) reason = result._result.get("skipped_reason", "") or result._result.get( "skip_reason", "" ) if len(reason) < 50: line += " -- {}".format(reason) print("{} {}---------".format(line, "-" * (line_length - len(line)))) else: print("{} {}".format(line, "-" * (line_length - len(line)))) print(self._indent_text(reason, 8)) print(reason)
python
{ "resource": "" }
q275207
prefix_to_addrmask
test
def prefix_to_addrmask(value, sep=" "): """ Converts a CIDR formatted prefix into an address netmask representation. Argument sep specifies the separator between the address and netmask parts. By default it's a single space. Examples: >>> "{{ '192.168.0.1/24|prefix_to_addrmask }}" -> "192.168.0.1 255.255.255.0" >>> "{{ '192.168.0.1/24|prefix_to_addrmask('/') }}" -> "192.168.0.1/255.255.255.0" """ prefix = netaddr.IPNetwork(value) return "{}{}{}".format(prefix.ip, sep, prefix.netmask)
python
{ "resource": "" }
q275208
check_empty
test
def check_empty(default=""): """ Decorator that checks if a value passed to a Jinja filter evaluates to false and returns an empty string. Otherwise calls the original Jinja filter. Example usage: @check_empty def my_jinja_filter(value, arg1): """ def real_decorator(func): @wraps(func) def wrapper(value, *args, **kwargs): if not value: return default else: return func(value, *args, **kwargs) return wrapper return real_decorator
python
{ "resource": "" }
q275209
Root.add_model
test
def add_model(self, model, force=False): """ Add a model. The model will be asssigned to a class attribute with the YANG name of the model. Args: model (PybindBase): Model to add. force (bool): If not set, verify the model is in SUPPORTED_MODELS Examples: >>> import napalm_yang >>> config = napalm_yang.base.Root() >>> config.add_model(napalm_yang.models.openconfig_interfaces) >>> config.interfaces <pyangbind.lib.yangtypes.YANGBaseClass object at 0x10bef6680> """ if isinstance(model, str): self._load_model(model) return try: model = model() except Exception: pass if model._yang_name not in [a[0] for a in SUPPORTED_MODELS] and not force: raise ValueError( "Only models in SUPPORTED_MODELS can be added without `force=True`" ) for k, v in model: self._elements[k] = v setattr(self, k, v)
python
{ "resource": "" }
q275210
Root.get
test
def get(self, filter=False): """ Returns a dictionary with the values of the model. Note that the values of the leafs are YANG classes. Args: filter (bool): If set to ``True``, show only values that have been set. Returns: dict: A dictionary with the values of the model. Example: >>> pretty_print(config.get(filter=True)) >>> { >>> "interfaces": { >>> "interface": { >>> "et1": { >>> "config": { >>> "description": "My description", >>> "mtu": 1500 >>> }, >>> "name": "et1" >>> }, >>> "et2": { >>> "config": { >>> "description": "Another description", >>> "mtu": 9000 >>> }, >>> "name": "et2" >>> } >>> } >>> } >>> } """ result = {} for k, v in self.elements().items(): intermediate = v.get(filter=filter) if intermediate: result[k] = intermediate return result
python
{ "resource": "" }
q275211
Root.load_dict
test
def load_dict(self, data, overwrite=False, auto_load_model=True): """ Load a dictionary into the model. Args: data(dict): Dictionary to load overwrite(bool): Whether the data present in the model should be overwritten by the data in the dict or not. auto_load_model(bool): If set to true models will be loaded as they are needed Examples: >>> vlans_dict = { >>> "vlans": { "vlan": { 100: { >>> "config": { >>> "vlan_id": 100, "name": "production"}}, >>> 200: { >>> "config": { >>> "vlan_id": 200, "name": "dev"}}}}} >>> config.load_dict(vlans_dict) >>> print(config.vlans.vlan.keys()) ... [200, 100] >>> print(100, config.vlans.vlan[100].config.name) ... (100, u'production') >>> print(200, config.vlans.vlan[200].config.name) ... (200, u'dev') """ for k, v in data.items(): if k not in self._elements.keys() and not auto_load_model: raise AttributeError("Model {} is not loaded".format(k)) elif k not in self._elements.keys() and auto_load_model: self._load_model(k) attr = getattr(self, k) _load_dict(attr, v)
python
{ "resource": "" }
q275212
Root.to_dict
test
def to_dict(self, filter=True): """ Returns a dictionary with the values of the model. Note that the values of the leafs are evaluated to python types. Args: filter (bool): If set to ``True``, show only values that have been set. Returns: dict: A dictionary with the values of the model. Example: >>> pretty_print(config.to_dict(filter=True)) >>> { >>> "interfaces": { >>> "interface": { >>> "et1": { >>> "config": { >>> "description": "My description", >>> "mtu": 1500 >>> }, >>> "name": "et1" >>> }, >>> "et2": { >>> "config": { >>> "description": "Another description", >>> "mtu": 9000 >>> }, >>> "name": "et2" >>> } >>> } >>> } >>> } """ result = {} for k, v in self: r = _to_dict(v, filter) if r: result[k] = r return result
python
{ "resource": "" }
q275213
Root.parse_config
test
def parse_config(self, device=None, profile=None, native=None, attrs=None): """ Parse native configuration and load it into the corresponding models. Only models that have been added to the root object will be parsed. If ``native`` is passed to the method that's what we will parse, otherwise, we will use the ``device`` to retrieve it. Args: device (NetworkDriver): Device to load the configuration from. profile (list): Profiles that the device supports. If no ``profile`` is passed it will be read from ``device``. native (list of strings): Native configuration to parse. Examples: >>> # Load from device >>> running_config = napalm_yang.base.Root() >>> running_config.add_model(napalm_yang.models.openconfig_interfaces) >>> running_config.parse_config(device=d) >>> # Load from file >>> with open("junos.config", "r") as f: >>> config = f.read() >>> >>> running_config = napalm_yang.base.Root() >>> running_config.add_model(napalm_yang.models.openconfig_interfaces) >>> running_config.parse_config(native=[config], profile="junos") """ if attrs is None: attrs = self.elements().values() for v in attrs: parser = Parser( v, device=device, profile=profile, native=native, is_config=True ) parser.parse()
python
{ "resource": "" }
q275214
Root.parse_state
test
def parse_state(self, device=None, profile=None, native=None, attrs=None): """ Parse native state and load it into the corresponding models. Only models that have been added to the root object will be parsed. If ``native`` is passed to the method that's what we will parse, otherwise, we will use the ``device`` to retrieve it. Args: device (NetworkDriver): Device to load the configuration from. profile (list): Profiles that the device supports. If no ``profile`` is passed it will be read from ``device``. native (list string): Native output to parse. Examples: >>> # Load from device >>> state = napalm_yang.base.Root() >>> state.add_model(napalm_yang.models.openconfig_interfaces) >>> state.parse_config(device=d) >>> # Load from file >>> with open("junos.state", "r") as f: >>> state_data = f.read() >>> >>> state = napalm_yang.base.Root() >>> state.add_model(napalm_yang.models.openconfig_interfaces) >>> state.parse_config(native=[state_data], profile="junos") """ if attrs is None: attrs = self.elements().values() for v in attrs: parser = Parser( v, device=device, profile=profile, native=native, is_config=False ) parser.parse()
python
{ "resource": "" }
q275215
Root.translate_config
test
def translate_config(self, profile, merge=None, replace=None): """ Translate the object to native configuration. In this context, merge and replace means the following: * **Merge** - Elements that exist in both ``self`` and ``merge`` will use by default the values in ``merge`` unless ``self`` specifies a new one. Elements that exist only in ``self`` will be translated as they are and elements present only in ``merge`` will be removed. * **Replace** - All the elements in ``replace`` will either be removed or replaced by elements in ``self``. You can specify one of ``merge``, ``replace`` or none of them. If none of them are set we will just translate configuration. Args: profile (list): Which profiles to use. merge (Root): Object we want to merge with. replace (Root): Object we want to replace. """ result = [] for k, v in self: other_merge = getattr(merge, k) if merge else None other_replace = getattr(replace, k) if replace else None translator = Translator( v, profile, merge=other_merge, replace=other_replace ) result.append(translator.translate()) return "\n".join(result)
python
{ "resource": "" }
q275216
load_filters
test
def load_filters(): """ Loads and returns all filters. """ all_filters = {} for m in JINJA_FILTERS: if hasattr(m, "filters"): all_filters.update(m.filters()) return all_filters
python
{ "resource": "" }
q275217
find_yang_file
test
def find_yang_file(profile, filename, path): """ Find the necessary file for the given test case. Args: device(napalm device connection): for which device filename(str): file to find path(str): where to find it relative to where the module is installed """ # Find base_dir of submodule module_dir = os.path.dirname(__file__) full_path = os.path.join(module_dir, "mappings", profile, path, filename) if os.path.exists(full_path): return full_path else: msg = "Couldn't find parsing file: {}".format(full_path) logger.error(msg) raise IOError(msg)
python
{ "resource": "" }
q275218
model_to_dict
test
def model_to_dict(model, mode="", show_defaults=False): """ Given a model, return a representation of the model in a dict. This is mostly useful to have a quick visual represenation of the model. Args: model (PybindBase): Model to transform. mode (string): Whether to print config, state or all elements ("" for all) Returns: dict: A dictionary representing the model. Examples: >>> config = napalm_yang.base.Root() >>> >>> # Adding models to the object >>> config.add_model(napalm_yang.models.openconfig_interfaces()) >>> config.add_model(napalm_yang.models.openconfig_vlan()) >>> # Printing the model in a human readable format >>> pretty_print(napalm_yang.utils.model_to_dict(config)) >>> { >>> "openconfig-interfaces:interfaces [rw]": { >>> "interface [rw]": { >>> "config [rw]": { >>> "description [rw]": "string", >>> "enabled [rw]": "boolean", >>> "mtu [rw]": "uint16", >>> "name [rw]": "string", >>> "type [rw]": "identityref" >>> }, >>> "hold_time [rw]": { >>> "config [rw]": { >>> "down [rw]": "uint32", >>> "up [rw]": "uint32" (trimmed for clarity) """ def is_mode(obj, mode): if mode == "": return True elif mode == "config": return obj._yang_name == "config" or obj._is_config elif mode == "state": return obj._yang_name == "state" or not obj._is_config else: raise ValueError( "mode can only be config, state or ''. Passed: {}".format(mode) ) def get_key(key, model, parent_defining_module, show_defaults): if not show_defaults: # No need to display rw/ro when showing the defaults. key = "{} {}".format(key, "[rw]" if model._is_config else "[ro]") if parent_defining_module != model._defining_module: key = "{}:{}".format(model._defining_module, key) return key if model._yang_type in ("container", "list"): cls = model if model._yang_type in ("container",) else model._contained_class() result = {} for k, v in cls: r = model_to_dict(v, mode=mode, show_defaults=show_defaults) if r: result[get_key(k, v, model._defining_module, show_defaults)] = r return result else: if show_defaults: if model._default is False: if model._yang_type != "boolean": # Unless the datatype is bool, when the _default attribute # is False, it means there is not default value defined in # the YANG model. return None return model._default return model._yang_type if is_mode(model, mode) else None
python
{ "resource": "" }
q275219
diff
test
def diff(f, s): """ Given two models, return the difference between them. Args: f (Pybindbase): First element. s (Pybindbase): Second element. Returns: dict: A dictionary highlighting the differences. Examples: >>> diff = napalm_yang.utils.diff(candidate, running) >>> pretty_print(diff) >>> { >>> "interfaces": { >>> "interface": { >>> "both": { >>> "Port-Channel1": { >>> "config": { >>> "mtu": { >>> "first": "0", >>> "second": "9000" >>> } >>> } >>> } >>> }, >>> "first_only": [ >>> "Loopback0" >>> ], >>> "second_only": [ >>> "Loopback1" >>> ] >>> } >>> } >>> } """ if isinstance(f, base.Root) or f._yang_type in ("container", None): result = _diff_root(f, s) elif f._yang_type in ("list",): result = _diff_list(f, s) else: result = {} first = "{}".format(f) second = "{}".format(s) if first != second: result = {"first": first, "second": second} return result
python
{ "resource": "" }
q275220
Client.http_post
test
def http_post(self, url, data=None): """POST to URL and get result as a response object. :param url: URL to POST. :type url: str :param data: Data to send in the form body. :type data: str :rtype: requests.Response """ if not url.startswith('https://'): raise ValueError('Protocol must be HTTPS, invalid URL: %s' % url) return requests.post(url, data, verify=True)
python
{ "resource": "" }
q275221
Client.get_authorization_code_uri
test
def get_authorization_code_uri(self, **params): """Construct a full URL that can be used to obtain an authorization code from the provider authorization_uri. Use this URI in a client frame to cause the provider to generate an authorization code. :rtype: str """ if 'response_type' not in params: params['response_type'] = self.default_response_type params.update({'client_id': self.client_id, 'redirect_uri': self.redirect_uri}) return utils.build_url(self.authorization_uri, params)
python
{ "resource": "" }
q275222
Client.get_token
test
def get_token(self, code, **params): """Get an access token from the provider token URI. :param code: Authorization code. :type code: str :return: Dict containing access token, refresh token, etc. :rtype: dict """ params['code'] = code if 'grant_type' not in params: params['grant_type'] = self.default_grant_type params.update({'client_id': self.client_id, 'client_secret': self.client_secret, 'redirect_uri': self.redirect_uri}) response = self.http_post(self.token_uri, params) try: return response.json() except TypeError: return response.json
python
{ "resource": "" }
q275223
url_query_params
test
def url_query_params(url): """Return query parameters as a dict from the specified URL. :param url: URL. :type url: str :rtype: dict """ return dict(urlparse.parse_qsl(urlparse.urlparse(url).query, True))
python
{ "resource": "" }
q275224
url_dequery
test
def url_dequery(url): """Return a URL with the query component removed. :param url: URL to dequery. :type url: str :rtype: str """ url = urlparse.urlparse(url) return urlparse.urlunparse((url.scheme, url.netloc, url.path, url.params, '', url.fragment))
python
{ "resource": "" }
q275225
build_url
test
def build_url(base, additional_params=None): """Construct a URL based off of base containing all parameters in the query portion of base plus any additional parameters. :param base: Base URL :type base: str ::param additional_params: Additional query parameters to include. :type additional_params: dict :rtype: str """ url = urlparse.urlparse(base) query_params = {} query_params.update(urlparse.parse_qsl(url.query, True)) if additional_params is not None: query_params.update(additional_params) for k, v in additional_params.iteritems(): if v is None: query_params.pop(k) return urlparse.urlunparse((url.scheme, url.netloc, url.path, url.params, urllib.urlencode(query_params), url.fragment))
python
{ "resource": "" }
q275226
Provider._handle_exception
test
def _handle_exception(self, exc): """Handle an internal exception that was caught and suppressed. :param exc: Exception to process. :type exc: Exception """ logger = logging.getLogger(__name__) logger.exception(exc)
python
{ "resource": "" }
q275227
Provider._make_response
test
def _make_response(self, body='', headers=None, status_code=200): """Return a response object from the given parameters. :param body: Buffer/string containing the response body. :type body: str :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ res = Response() res.status_code = status_code if headers is not None: res.headers.update(headers) res.raw = StringIO(body) return res
python
{ "resource": "" }
q275228
Provider._make_redirect_error_response
test
def _make_redirect_error_response(self, redirect_uri, err): """Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response """ params = { 'error': err, 'response_type': None, 'client_id': None, 'redirect_uri': None } redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={'Location': redirect}, status_code=302)
python
{ "resource": "" }
q275229
Provider._make_json_response
test
def _make_json_response(self, data, headers=None, status_code=200): """Return a response object from the given JSON data. :param data: Data to JSON-encode. :type data: mixed :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ response_headers = {} if headers is not None: response_headers.update(headers) response_headers['Content-Type'] = 'application/json;charset=UTF-8' response_headers['Cache-Control'] = 'no-store' response_headers['Pragma'] = 'no-cache' return self._make_response(json.dumps(data), response_headers, status_code)
python
{ "resource": "" }
q275230
AuthorizationProvider.get_authorization_code
test
def get_authorization_code(self, response_type, client_id, redirect_uri, **params): """Generate authorization code HTTP response. :param response_type: Desired response type. Must be exactly "code". :type response_type: str :param client_id: Client ID. :type client_id: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :rtype: requests.Response """ # Ensure proper response_type if response_type != 'code': err = 'unsupported_response_type' return self._make_redirect_error_response(redirect_uri, err) # Check redirect URI is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) if not is_valid_redirect_uri: return self._invalid_redirect_uri_response() # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_access = self.validate_access() scope = params.get('scope', '') is_valid_scope = self.validate_scope(client_id, scope) # Return proper error responses on invalid conditions if not is_valid_client_id: err = 'unauthorized_client' return self._make_redirect_error_response(redirect_uri, err) if not is_valid_access: err = 'access_denied' return self._make_redirect_error_response(redirect_uri, err) if not is_valid_scope: err = 'invalid_scope' return self._make_redirect_error_response(redirect_uri, err) # Generate authorization code code = self.generate_authorization_code() # Save information to be used to validate later requests self.persist_authorization_code(client_id=client_id, code=code, scope=scope) # Return redirection response params.update({ 'code': code, 'response_type': None, 'client_id': None, 'redirect_uri': None }) redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={'Location': redirect}, status_code=302)
python
{ "resource": "" }
q275231
AuthorizationProvider.refresh_token
test
def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params): """Generate access token HTTP response from a refresh token. :param grant_type: Desired grant type. Must be "refresh_token". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param refresh_token: Refresh token. :type refresh_token: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != 'refresh_token': return self._make_json_error_response('unsupported_grant_type') # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) scope = params.get('scope', '') is_valid_scope = self.validate_scope(client_id, scope) data = self.from_refresh_token(client_id, refresh_token, scope) is_valid_refresh_token = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response('invalid_client') if not is_valid_scope: return self._make_json_error_response('invalid_scope') if not is_valid_refresh_token: return self._make_json_error_response('invalid_grant') # Discard original refresh token self.discard_refresh_token(client_id, refresh_token) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information(client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data) # Return json response return self._make_json_response({ 'access_token': access_token, 'token_type': token_type, 'expires_in': expires_in, 'refresh_token': refresh_token })
python
{ "resource": "" }
q275232
AuthorizationProvider.get_token
test
def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params): """Generate access token HTTP response. :param grant_type: Desired grant type. Must be "authorization_code". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :param code: Authorization code. :type code: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != 'authorization_code': return self._make_json_error_response('unsupported_grant_type') # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) scope = params.get('scope', '') is_valid_scope = self.validate_scope(client_id, scope) data = self.from_authorization_code(client_id, code, scope) is_valid_grant = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response('invalid_client') if not is_valid_grant or not is_valid_redirect_uri: return self._make_json_error_response('invalid_grant') if not is_valid_scope: return self._make_json_error_response('invalid_scope') # Discard original authorization code self.discard_authorization_code(client_id, code) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information(client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data) # Return json response return self._make_json_response({ 'access_token': access_token, 'token_type': token_type, 'expires_in': expires_in, 'refresh_token': refresh_token })
python
{ "resource": "" }
q275233
AuthorizationProvider.get_authorization_code_from_uri
test
def get_authorization_code_from_uri(self, uri): """Get authorization code response from a URI. This method will ignore the domain and path of the request, instead automatically parsing the query string parameters. :param uri: URI to parse for authorization information. :type uri: str :rtype: requests.Response """ params = utils.url_query_params(uri) try: if 'response_type' not in params: raise TypeError('Missing parameter response_type in URL query') if 'client_id' not in params: raise TypeError('Missing parameter client_id in URL query') if 'redirect_uri' not in params: raise TypeError('Missing parameter redirect_uri in URL query') return self.get_authorization_code(**params) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request err = 'invalid_request' if 'redirect_uri' in params: u = params['redirect_uri'] return self._make_redirect_error_response(u, err) else: return self._invalid_redirect_uri_response() except StandardError as exc: self._handle_exception(exc) # Catch all other server errors err = 'server_error' u = params['redirect_uri'] return self._make_redirect_error_response(u, err)
python
{ "resource": "" }
q275234
AuthorizationProvider.get_token_from_post_data
test
def get_token_from_post_data(self, data): """Get a token response from POST data. :param data: POST data containing authorization information. :type data: dict :rtype: requests.Response """ try: # Verify OAuth 2.0 Parameters for x in ['grant_type', 'client_id', 'client_secret']: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) # Handle get token from refresh_token if 'refresh_token' in data: return self.refresh_token(**data) # Handle get token from authorization code for x in ['redirect_uri', 'code']: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) return self.get_token(**data) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request return self._make_json_error_response('invalid_request') except StandardError as exc: self._handle_exception(exc) # Catch all other server errors return self._make_json_error_response('server_error')
python
{ "resource": "" }
q275235
ResourceProvider.get_authorization
test
def get_authorization(self): """Get authorization object representing status of authentication.""" auth = self.authorization_class() header = self.get_authorization_header() if not header or not header.split: return auth header = header.split() if len(header) > 1 and header[0] == 'Bearer': auth.is_oauth = True access_token = header[1] self.validate_access_token(access_token, auth) if not auth.is_valid: auth.error = 'access_denied' return auth
python
{ "resource": "" }
q275236
SMBus.open
test
def open(self, bus): """Open the smbus interface on the specified bus.""" # Close the device if it's already open. if self._device is not None: self.close() # Try to open the file for the specified bus. Must turn off buffering # or else Python 3 fails (see: https://bugs.python.org/issue20074) self._device = open('/dev/i2c-{0}'.format(bus), 'r+b', buffering=0)
python
{ "resource": "" }
q275237
SMBus.read_byte
test
def read_byte(self, addr): """Read a single byte from the specified device.""" assert self._device is not None, 'Bus must be opened before operations are made against it!' self._select_device(addr) return ord(self._device.read(1))
python
{ "resource": "" }
q275238
SMBus.read_bytes
test
def read_bytes(self, addr, number): """Read many bytes from the specified device.""" assert self._device is not None, 'Bus must be opened before operations are made against it!' self._select_device(addr) return self._device.read(number)
python
{ "resource": "" }
q275239
SMBus.read_byte_data
test
def read_byte_data(self, addr, cmd): """Read a single byte from the specified cmd register of the device.""" assert self._device is not None, 'Bus must be opened before operations are made against it!' # Build ctypes values to marshall between ioctl and Python. reg = c_uint8(cmd) result = c_uint8() # Build ioctl request. request = make_i2c_rdwr_data([ (addr, 0, 1, pointer(reg)), # Write cmd register. (addr, I2C_M_RD, 1, pointer(result)) # Read 1 byte as result. ]) # Make ioctl call and return result data. ioctl(self._device.fileno(), I2C_RDWR, request) return result.value
python
{ "resource": "" }
q275240
SMBus.write_bytes
test
def write_bytes(self, addr, buf): """Write many bytes to the specified device. buf is a bytearray""" assert self._device is not None, 'Bus must be opened before operations are made against it!' self._select_device(addr) self._device.write(buf)
python
{ "resource": "" }
q275241
SMBus.write_byte_data
test
def write_byte_data(self, addr, cmd, val): """Write a byte of data to the specified cmd register of the device. """ assert self._device is not None, 'Bus must be opened before operations are made against it!' # Construct a string of data to send with the command register and byte value. data = bytearray(2) data[0] = cmd & 0xFF data[1] = val & 0xFF # Send the data to the device. self._select_device(addr) self._device.write(data)
python
{ "resource": "" }
q275242
SMBus.write_i2c_block_data
test
def write_i2c_block_data(self, addr, cmd, vals): """Write a buffer of data to the specified cmd register of the device. """ assert self._device is not None, 'Bus must be opened before operations are made against it!' # Construct a string of data to send, including room for the command register. data = bytearray(len(vals)+1) data[0] = cmd & 0xFF # Command register at the start. data[1:] = vals[0:] # Copy in the block data (ugly but necessary to ensure # the entire write happens in one transaction). # Send the data to the device. self._select_device(addr) self._device.write(data)
python
{ "resource": "" }
q275243
File.cdn_url
test
def cdn_url(self): """Returns file's CDN url. Usage example:: >>> file_ = File('a771f854-c2cb-408a-8c36-71af77811f3b') >>> file_.cdn_url https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/ You can set default effects:: >>> file_.default_effects = 'effect/flip/-/effect/mirror/' >>> file_.cdn_url https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/-/effect/flip/-/effect/mirror/ """ return '{cdn_base}{path}'.format(cdn_base=conf.cdn_base, path=self.cdn_path(self.default_effects))
python
{ "resource": "" }
q275244
File.copy
test
def copy(self, effects=None, target=None): """Creates a File Copy on Uploadcare or Custom Storage. File.copy method is deprecated and will be removed in 4.0.0. Please use `create_local_copy` and `create_remote_copy` instead. Args: - effects: Adds CDN image effects. If ``self.default_effects`` property is set effects will be combined with default effects. - target: Name of a custom storage connected to your project. Uploadcare storage is used if target is absent. """ warning = """File.copy method is deprecated and will be removed in 4.0.0. Please use `create_local_copy` and `create_remote_copy` instead. """ logger.warn('API Warning: {0}'.format(warning)) if target is not None: return self.create_remote_copy(target, effects) else: return self.create_local_copy(effects)
python
{ "resource": "" }
q275245
File.create_local_copy
test
def create_local_copy(self, effects=None, store=None): """Creates a Local File Copy on Uploadcare Storage. Args: - effects: Adds CDN image effects. If ``self.default_effects`` property is set effects will be combined with default effects. - store: If ``store`` option is set to False the copy of your file will be deleted in 24 hour period after the upload. Works only if `autostore` is enabled in the project. """ effects = self._build_effects(effects) store = store or '' data = { 'source': self.cdn_path(effects) } if store: data['store'] = store return rest_request('POST', 'files/', data=data)
python
{ "resource": "" }
q275246
File.create_remote_copy
test
def create_remote_copy(self, target, effects=None, make_public=None, pattern=None): """Creates file copy in remote storage. Args: - target: Name of a custom storage connected to the project. - effects: Adds CDN image effects to ``self.default_effects`` if any. - make_public: To forbid public from accessing your files on the storage set ``make_public`` option to be False. Default value is None. Files have public access by default. - pattern: Specify ``pattern`` option to set S3 object key name. Takes precedence over pattern set in project settings. If neither is specified defaults to `${uuid}/${filename}${effects}${ext}`. For more information on each of the options above please refer to REST API docs https://uploadcare.com/docs/api_reference/rest/accessing_files/. Following example copies a file to custom storage named ``samplefs``: >>> file = File('e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a') >>> file.create_remote_copy(target='samplefs', >>> make_public=True, >>> pattern='${uuid}/${filename}${ext}') Now custom storage ``samplefs`` contains publicly available file with original filename billmurray.jpg in in the directory named ``e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a``. """ effects = self._build_effects(effects) data = { 'source': self.cdn_path(effects), 'target': target } if make_public is not None: data['make_public'] = make_public if pattern is not None: data['pattern'] = pattern return rest_request('POST', 'files/', data=data)
python
{ "resource": "" }
q275247
File.construct_from
test
def construct_from(cls, file_info): """Constructs ``File`` instance from file information. For example you have result of ``/files/1921953c-5d94-4e47-ba36-c2e1dd165e1a/`` API request:: >>> file_info = { # ... 'uuid': '1921953c-5d94-4e47-ba36-c2e1dd165e1a', # ... } >>> File.construct_from(file_info) <uploadcare.File 1921953c-5d94-4e47-ba36-c2e1dd165e1a> """ file_ = cls(file_info['uuid']) file_.default_effects = file_info.get('default_effects') file_._info_cache = file_info return file_
python
{ "resource": "" }
q275248
File.upload
test
def upload(cls, file_obj, store=None): """Uploads a file and returns ``File`` instance. Args: - file_obj: file object to upload to - store (Optional[bool]): Should the file be automatically stored upon upload. Defaults to None. - False - do not store file - True - store file (can result in error if autostore is disabled for project) - None - use project settings Returns: ``File`` instance """ if store is None: store = 'auto' elif store: store = '1' else: store = '0' data = { 'UPLOADCARE_STORE': store, } files = uploading_request('POST', 'base/', data=data, files={'file': file_obj}) file_ = cls(files['file']) return file_
python
{ "resource": "" }
q275249
File.upload_from_url
test
def upload_from_url(cls, url, store=None, filename=None): """Uploads file from given url and returns ``FileFromUrl`` instance. Args: - url (str): URL of file to upload to - store (Optional[bool]): Should the file be automatically stored upon upload. Defaults to None. - False - do not store file - True - store file (can result in error if autostore is disabled for project) - None - use project settings - filename (Optional[str]): Name of the uploaded file. If this not specified the filename will be obtained from response headers or source URL. Defaults to None. Returns: ``FileFromUrl`` instance """ if store is None: store = 'auto' elif store: store = '1' else: store = '0' data = { 'source_url': url, 'store': store, } if filename: data['filename'] = filename result = uploading_request('POST', 'from_url/', data=data) if 'token' not in result: raise APIError( 'could not find token in result: {0}'.format(result) ) file_from_url = cls.FileFromUrl(result['token']) return file_from_url
python
{ "resource": "" }
q275250
File.upload_from_url_sync
test
def upload_from_url_sync(cls, url, timeout=30, interval=0.3, until_ready=False, store=None, filename=None): """Uploads file from given url and returns ``File`` instance. Args: - url (str): URL of file to upload to - store (Optional[bool]): Should the file be automatically stored upon upload. Defaults to None. - False - do not store file - True - store file (can result in error if autostore is disabled for project) - None - use project settings - filename (Optional[str]): Name of the uploaded file. If this not specified the filename will be obtained from response headers or source URL. Defaults to None. - timeout (Optional[int]): seconds to wait for successful upload. Defaults to 30. - interval (Optional[float]): interval between upload status checks. Defaults to 0.3. - until_ready (Optional[bool]): should we wait until file is available via CDN. Defaults to False. Returns: ``File`` instance Raises: ``TimeoutError`` if file wasn't uploaded in time """ ffu = cls.upload_from_url(url, store, filename) return ffu.wait(timeout=timeout, interval=interval, until_ready=until_ready)
python
{ "resource": "" }
q275251
FileGroup.file_cdn_urls
test
def file_cdn_urls(self): """Returns CDN urls of all files from group without API requesting. Usage example:: >>> file_group = FileGroup('0513dda0-582f-447d-846f-096e5df9e2bb~2') >>> file_group.file_cdn_urls[0] 'https://ucarecdn.com/0513dda0-582f-447d-846f-096e5df9e2bb~2/nth/0/' """ file_cdn_urls = [] for file_index in six.moves.xrange(len(self)): file_cdn_url = '{group_cdn_url}nth/{file_index}/'.format( group_cdn_url=self.cdn_url, file_index=file_index ) file_cdn_urls.append(file_cdn_url) return file_cdn_urls
python
{ "resource": "" }
q275252
FileGroup.construct_from
test
def construct_from(cls, group_info): """Constructs ``FileGroup`` instance from group information.""" group = cls(group_info['id']) group._info_cache = group_info return group
python
{ "resource": "" }
q275253
FileGroup.create
test
def create(cls, files): """Creates file group and returns ``FileGroup`` instance. It expects iterable object that contains ``File`` instances, e.g.:: >>> file_1 = File('6c5e9526-b0fe-4739-8975-72e8d5ee6342') >>> file_2 = File('a771f854-c2cb-408a-8c36-71af77811f3b') >>> FileGroup.create((file_1, file_2)) <uploadcare.FileGroup 0513dda0-6666-447d-846f-096e5df9e2bb~2> """ data = {} for index, file_ in enumerate(files): if isinstance(file_, File): file_index = 'files[{index}]'.format(index=index) data[file_index] = six.text_type(file_) else: raise InvalidParamError( 'all items have to be ``File`` instance' ) if not data: raise InvalidParamError('set of files is empty') group_info = uploading_request('POST', 'group/', data=data) group = cls.construct_from(group_info) return group
python
{ "resource": "" }
q275254
FilesStorage._base_opration
test
def _base_opration(self, method): """ Base method for storage operations. """ uuids = self.uuids() while True: chunk = list(islice(uuids, 0, self.chunk_size)) if not chunk: return rest_request(method, self.storage_url, chunk)
python
{ "resource": "" }
q275255
FilesStorage.uuids
test
def uuids(self): """ Extract uuid from each item of specified ``seq``. """ for f in self._seq: if isinstance(f, File): yield f.uuid elif isinstance(f, six.string_types): yield f else: raise ValueError( 'Invalid type for sequence item: {0}'.format(type(f)))
python
{ "resource": "" }
q275256
_list
test
def _list(api_list_class, arg_namespace, **extra): """ A common function for building methods of the "list showing". """ if arg_namespace.starting_point: ordering_field = (arg_namespace.ordering or '').lstrip('-') if ordering_field in ('', 'datetime_uploaded', 'datetime_created'): arg_namespace.starting_point = parser.parse( arg_namespace.starting_point) items = api_list_class( starting_point=arg_namespace.starting_point, ordering=arg_namespace.ordering, limit=arg_namespace.limit, request_limit=arg_namespace.request_limit, **extra ) items.constructor = lambda x: x try: pprint(list(items)) except ValueError as e: print(e)
python
{ "resource": "" }
q275257
bar
test
def bar(iter_content, parts, title=''): """ Iterates over the "iter_content" and draws a progress bar to stdout. """ parts = max(float(parts), 1.0) cells = 10 progress = 0 step = cells / parts draw = lambda progress: sys.stdout.write( '\r[{0:10}] {1:.2f}% {2}'.format( '#'*int(progress), progress * cells, title)) for chunk in iter_content: yield chunk progress += step draw(progress) sys.stdout.flush() draw(cells) print('')
python
{ "resource": "" }
q275258
uploading_request
test
def uploading_request(verb, path, data=None, files=None, timeout=conf.DEFAULT): """Makes Uploading API request and returns response as ``dict``. It takes settings from ``conf`` module. Make sure that given ``path`` does not contain leading slash. Usage example:: >>> file_obj = open('photo.jpg', 'rb') >>> uploading_request('POST', 'base/', files={'file': file_obj}) { 'file': '9b9f4483-77b8-40ae-a198-272ba6280004' } >>> File('9b9f4483-77b8-40ae-a198-272ba6280004') """ path = path.lstrip('/') url = urljoin(conf.upload_base, path) if data is None: data = {} data['pub_key'] = conf.pub_key data['UPLOADCARE_PUB_KEY'] = conf.pub_key headers = { 'User-Agent': _build_user_agent(), } try: response = session.request( str(verb), url, allow_redirects=True, verify=conf.verify_upload_ssl, data=data, files=files, headers=headers, timeout=_get_timeout(timeout), ) except requests.RequestException as exc: raise APIConnectionError(exc.args[0]) # No content. if response.status_code == 204: return {} if 200 <= response.status_code < 300: if _content_type_from_response(response).endswith(('/json', '+json')): try: return response.json() except ValueError as exc: raise APIError(exc.args[0]) if response.status_code in (400, 404): raise InvalidRequestError(response.content) # Not json or unknown status code. raise APIError(response.content)
python
{ "resource": "" }
q275259
Api.home_mode_status
test
def home_mode_status(self, **kwargs): """Returns the status of Home Mode""" api = self._api_info['home_mode'] payload = dict({ 'api': api['name'], 'method': 'GetInfo', 'version': api['version'], '_sid': self._sid }, **kwargs) response = self._get_json_with_retry(api['url'], payload) return response['data']['on']
python
{ "resource": "" }
q275260
Api.camera_list
test
def camera_list(self, **kwargs): """Return a list of cameras.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'List', 'version': api['version'], }, **kwargs) response = self._get_json_with_retry(api['url'], payload) cameras = [] for data in response['data']['cameras']: cameras.append(Camera(data, self._video_stream_url)) return cameras
python
{ "resource": "" }
q275261
Api.camera_info
test
def camera_info(self, camera_ids, **kwargs): """Return a list of cameras matching camera_ids.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'GetInfo', 'version': api['version'], 'cameraIds': ', '.join(str(id) for id in camera_ids), }, **kwargs) response = self._get_json_with_retry(api['url'], payload) cameras = [] for data in response['data']['cameras']: cameras.append(Camera(data, self._video_stream_url)) return cameras
python
{ "resource": "" }
q275262
Api.camera_snapshot
test
def camera_snapshot(self, camera_id, **kwargs): """Return bytes of camera image.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'GetSnapshot', 'version': api['version'], 'cameraId': camera_id, }, **kwargs) response = self._get(api['url'], payload) return response.content
python
{ "resource": "" }
q275263
Api.camera_disable
test
def camera_disable(self, camera_id, **kwargs): """Disable camera.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'Disable', 'version': 9, 'idList': camera_id, }, **kwargs) print(api['url']) print(payload) response = self._get(api['url'], payload) return response['success']
python
{ "resource": "" }
q275264
Api.camera_event_motion_enum
test
def camera_event_motion_enum(self, camera_id, **kwargs): """Return motion settings matching camera_id.""" api = self._api_info['camera_event'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'MotionEnum', 'version': api['version'], 'camId': camera_id, }, **kwargs) response = self._get_json_with_retry(api['url'], payload) return MotionSetting(camera_id, response['data']['MDParam'])
python
{ "resource": "" }
q275265
Api.camera_event_md_param_save
test
def camera_event_md_param_save(self, camera_id, **kwargs): """Update motion settings matching camera_id with keyword args.""" api = self._api_info['camera_event'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'MDParamSave', 'version': api['version'], 'camId': camera_id, }, **kwargs) response = self._get_json_with_retry(api['url'], payload) return response['data']['camId']
python
{ "resource": "" }
q275266
SurveillanceStation.update
test
def update(self): """Update cameras and motion settings with latest from API.""" cameras = self._api.camera_list() self._cameras_by_id = {v.camera_id: v for i, v in enumerate(cameras)} motion_settings = [] for camera_id in self._cameras_by_id.keys(): motion_setting = self._api.camera_event_motion_enum(camera_id) motion_settings.append(motion_setting) self._motion_settings_by_id = { v.camera_id: v for i, v in enumerate(motion_settings)}
python
{ "resource": "" }
q275267
is_last_li
test
def is_last_li(li, meta_data, current_numId): """ Determine if ``li`` is the last list item for a given list """ if not is_li(li, meta_data): return False w_namespace = get_namespace(li, 'w') next_el = li while True: # If we run out of element this must be the last list item if next_el is None: return True next_el = next_el.getnext() # Ignore elements that are not a list item if not is_li(next_el, meta_data): continue new_numId = get_numId(next_el, w_namespace) if current_numId != new_numId: return True # If we have gotten here then we have found another list item in the # current list, so ``li`` is not the last li in the list. return False
python
{ "resource": "" }
q275268
get_single_list_nodes_data
test
def get_single_list_nodes_data(li, meta_data): """ Find consecutive li tags that have content that have the same list id. """ yield li w_namespace = get_namespace(li, 'w') current_numId = get_numId(li, w_namespace) starting_ilvl = get_ilvl(li, w_namespace) el = li while True: el = el.getnext() if el is None: break # If the tag has no content ignore it. if not has_text(el): continue # Stop the lists if you come across a list item that should be a # heading. if _is_top_level_upper_roman(el, meta_data): break if ( is_li(el, meta_data) and (starting_ilvl > get_ilvl(el, w_namespace))): break new_numId = get_numId(el, w_namespace) if new_numId is None or new_numId == -1: # Not a p tag or a list item yield el continue # If the list id of the next tag is different that the previous that # means a new list being made (not nested) if current_numId != new_numId: # Not a subsequent list. break if is_last_li(el, meta_data, current_numId): yield el break yield el
python
{ "resource": "" }
q275269
get_ilvl
test
def get_ilvl(li, w_namespace): """ The ilvl on an li tag tells the li tag at what level of indentation this tag is at. This is used to determine if the li tag needs to be nested or not. """ ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap) if len(ilvls) == 0: return -1 return int(ilvls[0].get('%sval' % w_namespace))
python
{ "resource": "" }
q275270
get_v_merge
test
def get_v_merge(tc): """ vMerge is what docx uses to denote that a table cell is part of a rowspan. The first cell to have a vMerge is the start of the rowspan, and the vMerge will be denoted with 'restart'. If it is anything other than restart then it is a continuation of another rowspan. """ if tc is None: return None v_merges = tc.xpath('.//w:vMerge', namespaces=tc.nsmap) if len(v_merges) != 1: return None v_merge = v_merges[0] return v_merge
python
{ "resource": "" }
q275271
get_grid_span
test
def get_grid_span(tc): """ gridSpan is what docx uses to denote that a table cell has a colspan. This is much more simple than rowspans in that there is a one-to-one mapping from gridSpan to colspan. """ w_namespace = get_namespace(tc, 'w') grid_spans = tc.xpath('.//w:gridSpan', namespaces=tc.nsmap) if len(grid_spans) != 1: return 1 grid_span = grid_spans[0] return int(grid_span.get('%sval' % w_namespace))
python
{ "resource": "" }
q275272
get_td_at_index
test
def get_td_at_index(tr, index): """ When calculating the rowspan for a given cell it is required to find all table cells 'below' the initial cell with a v_merge. This function will return the td element at the passed in index, taking into account colspans. """ current = 0 for td in tr.xpath('.//w:tc', namespaces=tr.nsmap): if index == current: return td current += get_grid_span(td)
python
{ "resource": "" }
q275273
style_is_false
test
def style_is_false(style): """ For bold, italics and underline. Simply checking to see if the various tags are present will not suffice. If the tag is present and set to False then the style should not be present. """ if style is None: return False w_namespace = get_namespace(style, 'w') return style.get('%sval' % w_namespace) != 'false'
python
{ "resource": "" }
q275274
is_bold
test
def is_bold(r): """ The function will return True if the r tag passed in is considered bold. """ w_namespace = get_namespace(r, 'w') rpr = r.find('%srPr' % w_namespace) if rpr is None: return False bold = rpr.find('%sb' % w_namespace) return style_is_false(bold)
python
{ "resource": "" }
q275275
is_italics
test
def is_italics(r): """ The function will return True if the r tag passed in is considered italicized. """ w_namespace = get_namespace(r, 'w') rpr = r.find('%srPr' % w_namespace) if rpr is None: return False italics = rpr.find('%si' % w_namespace) return style_is_false(italics)
python
{ "resource": "" }
q275276
is_underlined
test
def is_underlined(r): """ The function will return True if the r tag passed in is considered underlined. """ w_namespace = get_namespace(r, 'w') rpr = r.find('%srPr' % w_namespace) if rpr is None: return False underline = rpr.find('%su' % w_namespace) return style_is_false(underline)
python
{ "resource": "" }
q275277
is_title
test
def is_title(p): """ Certain p tags are denoted as ``Title`` tags. This function will return True if the passed in p tag is considered a title. """ w_namespace = get_namespace(p, 'w') styles = p.xpath('.//w:pStyle', namespaces=p.nsmap) if len(styles) == 0: return False style = styles[0] return style.get('%sval' % w_namespace) == 'Title'
python
{ "resource": "" }
q275278
get_text_run_content_data
test
def get_text_run_content_data(r): """ It turns out that r tags can contain both t tags and drawing tags. Since we need both, this function will return them in the order in which they are found. """ w_namespace = get_namespace(r, 'w') valid_elements = ( '%st' % w_namespace, '%sdrawing' % w_namespace, '%spict' % w_namespace, '%sbr' % w_namespace, ) for el in r: if el.tag in valid_elements: yield el
python
{ "resource": "" }
q275279
get_relationship_info
test
def get_relationship_info(tree, media, image_sizes): """ There is a separate file holds the targets to links as well as the targets for images. Return a dictionary based on the relationship id and the target. """ if tree is None: return {} result = {} # Loop through each relationship. for el in tree.iter(): el_id = el.get('Id') if el_id is None: continue # Store the target in the result dict. target = el.get('Target') if any( target.lower().endswith(ext) for ext in IMAGE_EXTENSIONS_TO_SKIP): continue if target in media: image_size = image_sizes.get(el_id) target = convert_image(media[target], image_size) # cgi will replace things like & < > with &amp; &lt; &gt; result[el_id] = cgi.escape(target) return result
python
{ "resource": "" }
q275280
_get_document_data
test
def _get_document_data(f, image_handler=None): ''' ``f`` is a ``ZipFile`` that is open Extract out the document data, numbering data and the relationship data. ''' if image_handler is None: def image_handler(image_id, relationship_dict): return relationship_dict.get(image_id) document_xml = None numbering_xml = None relationship_xml = None styles_xml = None parser = etree.XMLParser(strip_cdata=False) path, _ = os.path.split(f.filename) media = {} image_sizes = {} # Loop through the files in the zip file. for item in f.infolist(): # This file holds all the content of the document. if item.filename == 'word/document.xml': xml = f.read(item.filename) document_xml = etree.fromstring(xml, parser) # This file tells document.xml how lists should look. elif item.filename == 'word/numbering.xml': xml = f.read(item.filename) numbering_xml = etree.fromstring(xml, parser) elif item.filename == 'word/styles.xml': xml = f.read(item.filename) styles_xml = etree.fromstring(xml, parser) # This file holds the targets for hyperlinks and images. elif item.filename == 'word/_rels/document.xml.rels': xml = f.read(item.filename) try: relationship_xml = etree.fromstring(xml, parser) except XMLSyntaxError: relationship_xml = etree.fromstring('<xml></xml>', parser) if item.filename.startswith('word/media/'): # Strip off the leading word/ media[item.filename[len('word/'):]] = f.extract( item.filename, path, ) # Close the file pointer. f.close() # Get dictionaries for the numbering and the relationships. numbering_dict = get_numbering_info(numbering_xml) image_sizes = get_image_sizes(document_xml) relationship_dict = get_relationship_info( relationship_xml, media, image_sizes ) styles_dict = get_style_dict(styles_xml) font_sizes_dict = defaultdict(int) if DETECT_FONT_SIZE: font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict) meta_data = MetaData( numbering_dict=numbering_dict, relationship_dict=relationship_dict, styles_dict=styles_dict, font_sizes_dict=font_sizes_dict, image_handler=image_handler, image_sizes=image_sizes, ) return document_xml, meta_data
python
{ "resource": "" }
q275281
get_ordered_list_type
test
def get_ordered_list_type(meta_data, numId, ilvl): """ Return the list type. If numId or ilvl not in the numbering dict then default to returning decimal. This function only cares about ordered lists, unordered lists get dealt with elsewhere. """ # Early return if numId or ilvl are not valid numbering_dict = meta_data.numbering_dict if numId not in numbering_dict: return DEFAULT_LIST_NUMBERING_STYLE if ilvl not in numbering_dict[numId]: return DEFAULT_LIST_NUMBERING_STYLE return meta_data.numbering_dict[numId][ilvl]
python
{ "resource": "" }
q275282
build_list
test
def build_list(li_nodes, meta_data): """ Build the list structure and return the root list """ # Need to keep track of all incomplete nested lists. ol_dict = {} # Need to keep track of the current indentation level. current_ilvl = -1 # Need to keep track of the current list id. current_numId = -1 # Need to keep track of list that new li tags should be added too. current_ol = None # Store the first list created (the root list) for the return value. root_ol = None visited_nodes = [] list_contents = [] def _build_li(list_contents): data = '<br />'.join(t for t in list_contents if t is not None) return etree.XML('<li>%s</li>' % data) def _build_non_li_content(el, meta_data): w_namespace = get_namespace(el, 'w') if el.tag == '%stbl' % w_namespace: new_el, visited_nodes = build_table(el, meta_data) return etree.tostring(new_el), visited_nodes elif el.tag == '%sp' % w_namespace: return get_element_content(el, meta_data), [el] if has_text(el): raise UnintendedTag('Did not expect %s' % el.tag) def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol): for i in reversed(range(ilvl, current_ilvl)): # Any list that is more indented that ilvl needs to # be merged to the list before it. if i not in ol_dict: continue if ol_dict[i] is not current_ol: if ol_dict[i] is current_ol: continue ol_dict[i][-1].append(current_ol) current_ol = ol_dict[i] # Clean up finished nested lists. for key in list(ol_dict): if key > ilvl: del ol_dict[key] return current_ol for li_node in li_nodes: w_namespace = get_namespace(li_node, 'w') if not is_li(li_node, meta_data): # Get the content and visited nodes new_el, el_visited_nodes = _build_non_li_content( li_node, meta_data, ) list_contents.append(new_el) visited_nodes.extend(el_visited_nodes) continue if list_contents: li_el = _build_li(list_contents) list_contents = [] current_ol.append(li_el) # Get the data needed to build the current list item list_contents.append(get_element_content( li_node, meta_data, )) ilvl = get_ilvl(li_node, w_namespace) numId = get_numId(li_node, w_namespace) list_type = get_ordered_list_type(meta_data, numId, ilvl) # If the ilvl is greater than the current_ilvl or the list id is # changing then we have the first li tag in a nested list. We need to # create a new list object and update all of our variables for keeping # track. if (ilvl > current_ilvl) or (numId != current_numId): # Only create a new list ol_dict[ilvl] = create_list(list_type) current_ol = ol_dict[ilvl] current_ilvl = ilvl current_numId = numId # Both cases above are not True then we need to close all lists greater # than ilvl and then remove them from the ol_dict else: # Merge any nested lists that need to be merged. current_ol = _merge_lists( ilvl=ilvl, current_ilvl=current_ilvl, ol_dict=ol_dict, current_ol=current_ol, ) # Set the root list after the first list is created. if root_ol is None: root_ol = current_ol # Set the current list. if ilvl in ol_dict: current_ol = ol_dict[ilvl] else: # In some instances the ilvl is not in the ol_dict, if that is the # case, create it here (not sure how this happens but it has # before.) Only do this if the current_ol is not the root_ol, # otherwise etree will crash. if current_ol is not root_ol: # Merge the current_ol into the root_ol. _merge_lists is not # equipped to handle this situation since the only way to get # into this block of code is to have mangled ilvls. root_ol[-1].append(current_ol) # Reset the current_ol current_ol = create_list(list_type) # Create the li element. visited_nodes.extend(list(li_node.iter())) # If a list item is the last thing in a document, then you will need to add # it here. Should probably figure out how to get the above logic to deal # with it. if list_contents: li_el = _build_li(list_contents) list_contents = [] current_ol.append(li_el) # Merge up any nested lists that have not been merged. current_ol = _merge_lists( ilvl=0, current_ilvl=current_ilvl, ol_dict=ol_dict, current_ol=current_ol, ) return root_ol, visited_nodes
python
{ "resource": "" }
q275283
build_tr
test
def build_tr(tr, meta_data, row_spans): """ This will return a single tr element, with all tds already populated. """ # Create a blank tr element. tr_el = etree.Element('tr') w_namespace = get_namespace(tr, 'w') visited_nodes = [] for el in tr: if el in visited_nodes: continue visited_nodes.append(el) # Find the table cells. if el.tag == '%stc' % w_namespace: v_merge = get_v_merge(el) # If there is a v_merge and it is not restart then this cell can be # ignored. if ( v_merge is not None and v_merge.get('%sval' % w_namespace) != 'restart'): continue # Loop through each and build a list of all the content. texts = [] for td_content in el: # Since we are doing look-a-heads in this loop we need to check # again to see if we have already visited the node. if td_content in visited_nodes: continue # Check to see if it is a list or a regular paragraph. if is_li(td_content, meta_data): # If it is a list, create the list and update # visited_nodes. li_nodes = get_single_list_nodes_data( td_content, meta_data, ) list_el, list_visited_nodes = build_list( li_nodes, meta_data, ) visited_nodes.extend(list_visited_nodes) texts.append(etree.tostring(list_el)) elif td_content.tag == '%stbl' % w_namespace: table_el, table_visited_nodes = build_table( td_content, meta_data, ) visited_nodes.extend(table_visited_nodes) texts.append(etree.tostring(table_el)) elif td_content.tag == '%stcPr' % w_namespace: # Do nothing visited_nodes.append(td_content) continue else: text = get_element_content( td_content, meta_data, is_td=True, ) texts.append(text) data = '<br />'.join(t for t in texts if t is not None) td_el = etree.XML('<td>%s</td>' % data) # if there is a colspan then set it here. colspan = get_grid_span(el) if colspan > 1: td_el.set('colspan', '%d' % colspan) v_merge = get_v_merge(el) # If this td has a v_merge and it is restart then set the rowspan # here. if ( v_merge is not None and v_merge.get('%sval' % w_namespace) == 'restart'): rowspan = next(row_spans) td_el.set('rowspan', '%d' % rowspan) tr_el.append(td_el) return tr_el
python
{ "resource": "" }
q275284
build_table
test
def build_table(table, meta_data): """ This returns a table object with all rows and cells correctly populated. """ # Create a blank table element. table_el = etree.Element('table') w_namespace = get_namespace(table, 'w') # Get the rowspan values for cells that have a rowspan. row_spans = get_rowspan_data(table) for el in table: if el.tag == '%str' % w_namespace: # Create the tr element. tr_el = build_tr( el, meta_data, row_spans, ) # And append it to the table. table_el.append(tr_el) visited_nodes = list(table.iter()) return table_el, visited_nodes
python
{ "resource": "" }
q275285
get_t_tag_content
test
def get_t_tag_content( t, parent, remove_bold, remove_italics, meta_data): """ Generate the string data that for this particular t tag. """ if t is None or t.text is None: return '' # Need to escape the text so that we do not accidentally put in text # that is not valid XML. # cgi will replace things like & < > with &amp; &lt; &gt; text = cgi.escape(t.text) # Wrap the text with any modifiers it might have (bold, italics or # underline) el_is_bold = not remove_bold and ( is_bold(parent) or is_underlined(parent) ) el_is_italics = not remove_italics and is_italics(parent) if el_is_bold: text = '<strong>%s</strong>' % text if el_is_italics: text = '<em>%s</em>' % text return text
python
{ "resource": "" }
q275286
_strip_tag
test
def _strip_tag(tree, tag): """ Remove all tags that have the tag name ``tag`` """ for el in tree.iter(): if el.tag == tag: el.getparent().remove(el)
python
{ "resource": "" }
q275287
find
test
def find(dataset, url): '''Find the location of a dataset on disk, downloading if needed.''' fn = os.path.join(DATASETS, dataset) dn = os.path.dirname(fn) if not os.path.exists(dn): print('creating dataset directory: %s', dn) os.makedirs(dn) if not os.path.exists(fn): if sys.version_info < (3, ): urllib.urlretrieve(url, fn) else: urllib.request.urlretrieve(url, fn) return fn
python
{ "resource": "" }
q275288
load_mnist
test
def load_mnist(flatten=True, labels=False): '''Load the MNIST digits dataset.''' fn = find('mnist.pkl.gz', 'http://deeplearning.net/data/mnist/mnist.pkl.gz') h = gzip.open(fn, 'rb') if sys.version_info < (3, ): (timg, tlab), (vimg, vlab), (simg, slab) = pickle.load(h) else: (timg, tlab), (vimg, vlab), (simg, slab) = pickle.load(h, encoding='bytes') h.close() if not flatten: timg = timg.reshape((-1, 28, 28, 1)) vimg = vimg.reshape((-1, 28, 28, 1)) simg = simg.reshape((-1, 28, 28, 1)) if labels: return ((timg, tlab.astype('i')), (vimg, vlab.astype('i')), (simg, slab.astype('i'))) return (timg, ), (vimg, ), (simg, )
python
{ "resource": "" }
q275289
load_cifar
test
def load_cifar(flatten=True, labels=False): '''Load the CIFAR10 image dataset.''' def extract(name): print('extracting data from {}'.format(name)) h = tar.extractfile(name) if sys.version_info < (3, ): d = pickle.load(h) else: d = pickle.load(h, encoding='bytes') for k in list(d): d[k.decode('utf8')] = d[k] h.close() img = d['data'].reshape( (-1, 3, 32, 32)).transpose((0, 2, 3, 1)).astype('f') / 128 - 1 if flatten: img = img.reshape((-1, 32 * 32 * 3)) d['data'] = img return d fn = find('cifar10.tar.gz', 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz') tar = tarfile.open(fn) imgs = [] labs = [] for i in range(1, 6): d = extract('cifar-10-batches-py/data_batch_{}'.format(i)) imgs.extend(d['data']) labs.extend(d['labels']) timg = np.asarray(imgs[:40000]) tlab = np.asarray(labs[:40000], 'i') vimg = np.asarray(imgs[40000:]) vlab = np.asarray(labs[40000:], 'i') d = extract('cifar-10-batches-py/test_batch') simg = d['data'] slab = d['labels'] tar.close() if labels: return (timg, tlab), (vimg, vlab), (simg, slab) return (timg, ), (vimg, ), (simg, )
python
{ "resource": "" }
q275290
plot_images
test
def plot_images(imgs, loc, title=None, channels=1): '''Plot an array of images. We assume that we are given a matrix of data whose shape is (n*n, s*s*c) -- that is, there are n^2 images along the first axis of the array, and each image is c squares measuring s pixels on a side. Each row of the input will be plotted as a sub-region within a single image array containing an n x n grid of images. ''' n = int(np.sqrt(len(imgs))) assert n * n == len(imgs), 'images array must contain a square number of rows!' s = int(np.sqrt(len(imgs[0]) / channels)) assert s * s == len(imgs[0]) / channels, 'images must be square!' img = np.zeros(((s+1) * n - 1, (s+1) * n - 1, channels), dtype=imgs[0].dtype) for i, pix in enumerate(imgs): r, c = divmod(i, n) img[r * (s+1):(r+1) * (s+1) - 1, c * (s+1):(c+1) * (s+1) - 1] = pix.reshape((s, s, channels)) img -= img.min() img /= img.max() ax = plt.gcf().add_subplot(loc) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) ax.set_frame_on(False) ax.imshow(img.squeeze(), cmap=plt.cm.gray) if title: ax.set_title(title)
python
{ "resource": "" }
q275291
plot_layers
test
def plot_layers(weights, tied_weights=False, channels=1): '''Create a plot of weights, visualized as "bottom-level" pixel arrays.''' if hasattr(weights[0], 'get_value'): weights = [w.get_value() for w in weights] k = min(len(weights), 9) imgs = np.eye(weights[0].shape[0]) for i, weight in enumerate(weights[:-1]): imgs = np.dot(weight.T, imgs) plot_images(imgs, 100 + 10 * k + i + 1, channels=channels, title='Layer {}'.format(i+1)) weight = weights[-1] n = weight.shape[1] / channels if int(np.sqrt(n)) ** 2 != n: return if tied_weights: imgs = np.dot(weight.T, imgs) plot_images(imgs, 100 + 10 * k + k, channels=channels, title='Layer {}'.format(k)) else: plot_images(weight, 100 + 10 * k + k, channels=channels, title='Decoding weights')
python
{ "resource": "" }
q275292
plot_filters
test
def plot_filters(filters): '''Create a plot of conv filters, visualized as pixel arrays.''' imgs = filters.get_value() N, channels, x, y = imgs.shape n = int(np.sqrt(N)) assert n * n == N, 'filters must contain a square number of rows!' assert channels == 1 or channels == 3, 'can only plot grayscale or rgb filters!' img = np.zeros(((y+1) * n - 1, (x+1) * n - 1, channels), dtype=imgs[0].dtype) for i, pix in enumerate(imgs): r, c = divmod(i, n) img[r * (y+1):(r+1) * (y+1) - 1, c * (x+1):(c+1) * (x+1) - 1] = pix.transpose((1, 2, 0)) img -= img.min() img /= img.max() ax = plt.gcf().add_subplot(111) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) ax.set_frame_on(False) ax.imshow(img.squeeze(), cmap=plt.cm.gray)
python
{ "resource": "" }
q275293
batches
test
def batches(arrays, steps=100, batch_size=64, rng=None): '''Create a callable that generates samples from a dataset. Parameters ---------- arrays : list of ndarray (time-steps, data-dimensions) Arrays of data. Rows in these arrays are assumed to correspond to time steps, and columns to variables. Multiple arrays can be given; in such a case, these arrays usually correspond to [input, output]---for example, for a recurrent regression problem---or [input, output, weights]---for a weighted regression or classification problem. steps : int, optional Generate samples of this many time steps. Defaults to 100. batch_size : int, optional Generate this many samples per call. Defaults to 64. This must match the batch_size parameter that was used when creating the recurrent network that will process the data. rng : :class:`numpy.random.RandomState` or int, optional A random number generator, or an integer seed for a random number generator. If not provided, the random number generator will be created with an automatically chosen seed. Returns ------- callable : A callable that can be used inside a dataset for training a recurrent network. ''' assert batch_size >= 2, 'batch_size must be at least 2!' assert isinstance(arrays, (tuple, list)), 'arrays must be a tuple or list!' if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) def sample(): xs = [np.zeros((batch_size, steps, a.shape[1]), a.dtype) for a in arrays] for i in range(batch_size): j = rng.randint(len(arrays[0]) - steps) for x, a in zip(xs, arrays): x[i] = a[j:j+steps] return xs return sample
python
{ "resource": "" }
q275294
Text.encode
test
def encode(self, txt): '''Encode a text string by replacing characters with alphabet index. Parameters ---------- txt : str A string to encode. Returns ------- classes : list of int A sequence of alphabet index values corresponding to the given text. ''' return list(self._fwd_index.get(c, 0) for c in txt)
python
{ "resource": "" }
q275295
Text.classifier_batches
test
def classifier_batches(self, steps, batch_size, rng=None): '''Create a callable that returns a batch of training data. Parameters ---------- steps : int Number of time steps in each batch. batch_size : int Number of training examples per batch. rng : :class:`numpy.random.RandomState` or int, optional A random number generator, or an integer seed for a random number generator. If not provided, the random number generator will be created with an automatically chosen seed. Returns ------- batch : callable A callable that, when called, returns a batch of data that can be used to train a classifier model. ''' assert batch_size >= 2, 'batch_size must be at least 2!' if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) T = np.arange(steps) def batch(): inputs = np.zeros((batch_size, steps, 1 + len(self.alpha)), 'f') outputs = np.zeros((batch_size, steps), 'i') for b in range(batch_size): offset = rng.randint(len(self.text) - steps - 1) enc = self.encode(self.text[offset:offset + steps + 1]) inputs[b, T, enc[:-1]] = 1 outputs[b, T] = enc[1:] return [inputs, outputs] return batch
python
{ "resource": "" }
q275296
Classifier.predict_sequence
test
def predict_sequence(self, labels, steps, streams=1, rng=None): '''Draw a sequential sample of class labels from this network. Parameters ---------- labels : list of int A list of integer class labels to get the classifier started. steps : int The number of time steps to sample. streams : int, optional Number of parallel streams to sample from the model. Defaults to 1. rng : :class:`numpy.random.RandomState` or int, optional A random number generator, or an integer seed for a random number generator. If not provided, the random number generator will be created with an automatically chosen seed. Yields ------ label(s) : int or list of int Yields at each time step an integer class label sampled sequentially from the model. If the number of requested streams is greater than 1, this will be a list containing the corresponding number of class labels. ''' if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) offset = len(labels) batch = max(2, streams) inputs = np.zeros((batch, offset + steps, self.layers[0].output_size), 'f') inputs[:, np.arange(offset), labels] = 1 for i in range(offset, offset + steps): chars = [] for pdf in self.predict_proba(inputs[:i])[:, -1]: try: c = rng.multinomial(1, pdf).argmax(axis=-1) except ValueError: # sometimes the pdf triggers a normalization error. just # choose greedily in this case. c = pdf.argmax(axis=-1) chars.append(int(c)) inputs[np.arange(batch), i, chars] = 1 yield chars[0] if streams == 1 else chars
python
{ "resource": "" }
q275297
Convolution.add_conv_weights
test
def add_conv_weights(self, name, mean=0, std=None, sparsity=0): '''Add a convolutional weight array to this layer's parameters. Parameters ---------- name : str Name of the parameter to add. mean : float, optional Mean value for randomly-initialized weights. Defaults to 0. std : float, optional Standard deviation of initial matrix values. Defaults to :math:`1 / sqrt(n_i + n_o)`. sparsity : float, optional Fraction of weights to set to zero. Defaults to 0. ''' nin = self.input_size nout = self.output_size mean = self.kwargs.get( 'mean_{}'.format(name), self.kwargs.get('mean', mean)) std = self.kwargs.get( 'std_{}'.format(name), self.kwargs.get('std', std or 1 / np.sqrt(nin + nout))) sparsity = self.kwargs.get( 'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity)) arr = np.zeros((nout, nin) + self.filter_size, util.FLOAT) for r in range(self.filter_size[0]): for c in range(self.filter_size[1]): arr[:, :, r, c] = util.random_matrix( nout, nin, mean, std, sparsity=sparsity, rng=self.rng) self._params.append(theano.shared(arr, name=self._fmt(name)))
python
{ "resource": "" }
q275298
Autoencoder.encode
test
def encode(self, x, layer=None, sample=False, **kwargs): '''Encode a dataset using the hidden layer activations of our network. Parameters ---------- x : ndarray A dataset to encode. Rows of this dataset capture individual data points, while columns represent the variables in each data point. layer : str, optional The name of the hidden layer output to use. By default, we use the "middle" hidden layer---for example, for a 4,2,4 or 4,3,2,3,4 autoencoder, we use the layer with size 2. sample : bool, optional If True, then draw a sample using the hidden activations as independent Bernoulli probabilities for the encoded data. This assumes the hidden layer has a logistic sigmoid activation function. Returns ------- ndarray : The given dataset, encoded by the appropriate hidden layer activation. ''' enc = self.feed_forward(x, **kwargs)[self._find_output(layer)] if sample: return np.random.binomial(n=1, p=enc).astype(np.uint8) return enc
python
{ "resource": "" }
q275299
Autoencoder.decode
test
def decode(self, z, layer=None, **kwargs): '''Decode an encoded dataset by computing the output layer activation. Parameters ---------- z : ndarray A matrix containing encoded data from this autoencoder. layer : int or str or :class:`Layer <layers.Layer>`, optional The index or name of the hidden layer that was used to encode `z`. Returns ------- decoded : ndarray The decoded dataset. ''' key = self._find_output(layer) if key not in self._functions: regs = regularizers.from_kwargs(self, **kwargs) outputs, updates = self.build_graph(regs) self._functions[key] = theano.function( [outputs[key]], [outputs[self.layers[-1].output_name]], updates=updates) return self._functions[key](z)[0]
python
{ "resource": "" }