{"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nparse an RFC 1123 or asctime - like format date string to produce a Python datetime object.", "response": "def parse_date(value):\n \"\"\"Parse an RFC 1123 or asctime-like format date string to produce\n a Python datetime object (without a timezone).\n \"\"\"\n # Do the regex magic; also enforces 2 or 4 digit years\n match = Definitions.DATE_RE.match(value) if value else None\n if not match:\n return None\n # We're going to extract and prepare captured data in 'data'.\n data = {}\n captured = match.groupdict()\n fields = ['year', 'month', 'day', 'hour', 'minute', 'second']\n # If we matched on the RFC 1123 family format\n if captured['year']:\n for field in fields:\n data[field] = captured[field]\n # If we matched on the asctime format, use year2 etc.\n else:\n for field in fields:\n data[field] = captured[field + \"2\"]\n year = data['year']\n # Interpret lame 2-digit years - base the cutoff on UNIX epoch, in case\n # someone sets a '70' cookie meaning 'distant past'. This won't break for\n # 58 years and people who use 2-digit years are asking for it anyway.\n if len(year) == 2:\n if int(year) < 70:\n year = \"20\" + year\n else:\n year = \"19\" + year\n year = int(year)\n # Clamp to [1900, 9999]: strftime has min 1900, datetime has max 9999\n data['year'] = max(1900, min(year, 9999))\n # Other things which are numbers should convert to integer\n for field in ['day', 'hour', 'minute', 'second']:\n if data[field] is None:\n data[field] = 0\n data[field] = int(data[field])\n # Look up the number datetime needs for the named month\n data['month'] = Definitions.month_numbers[data['month'].lower()]\n return datetime.datetime(**data)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef parse_value(value, allow_spaces=True, unquote=default_unquote):\n \"Process a cookie value\"\n if value is None:\n return None\n value = strip_spaces_and_quotes(value)\n value = parse_string(value, unquote=unquote)\n if not allow_spaces:\n assert ' ' not in value\n return value", "response": "Process a cookie value"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef valid_name(name):\n \"Validate a cookie name string\"\n if isinstance(name, bytes):\n name = name.decode('ascii')\n if not Definitions.COOKIE_NAME_RE.match(name):\n return False\n # This module doesn't support $identifiers, which are part of an obsolete\n # and highly complex standard which is never used.\n if name[0] == \"$\":\n return False\n return True", "response": "Validate a cookie name string"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef valid_value(value, quote=default_cookie_quote, unquote=default_unquote):\n if value is None:\n return False\n\n # Put the value through a round trip with the given quote and unquote\n # functions, so we will know whether data will get lost or not in the event\n # that we don't complain.\n encoded = encode_cookie_value(value, quote=quote)\n decoded = parse_string(encoded, unquote=unquote)\n\n # If the original string made the round trip, this is a valid value for the\n # given quote and unquote functions. Since the round trip can generate\n # different unicode forms, normalize before comparing, so we can ignore\n # trivial inequalities.\n decoded_normalized = (normalize(\"NFKD\", decoded)\n if not isinstance(decoded, bytes) else decoded)\n value_normalized = (normalize(\"NFKD\", value)\n if not isinstance(value, bytes) else value)\n if decoded_normalized == value_normalized:\n return True\n return False", "response": "Validate a cookie value string."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef valid_date(date):\n \"Validate an expires datetime object\"\n # We want something that acts like a datetime. In particular,\n # strings indicate a failure to parse down to an object and ints are\n # nonstandard and ambiguous at best.\n if not hasattr(date, 'tzinfo'):\n return False\n # Relevant RFCs define UTC as 'close enough' to GMT, and the maximum\n # difference between UTC and GMT is often stated to be less than a second.\n if date.tzinfo is None or _total_seconds(date.utcoffset()) < 1.1:\n return True\n return False", "response": "Validate an expires datetime object"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nvalidate a cookie domain ASCII string", "response": "def valid_domain(domain):\n \"Validate a cookie domain ASCII string\"\n # Using encoding on domain would confuse browsers into not sending cookies.\n # Generate UnicodeDecodeError up front if it can't store as ASCII.\n domain.encode('ascii')\n # Domains starting with periods are not RFC-valid, but this is very common\n # in existing cookies, so they should still parse with DOMAIN_AV.\n if Definitions.DOMAIN_RE.match(domain):\n return True\n return False"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef valid_path(value):\n \"Validate a cookie path ASCII string\"\n # Generate UnicodeDecodeError if path can't store as ASCII.\n value.encode(\"ascii\")\n # Cookies without leading slash will likely be ignored, raise ASAP.\n if not (value and value[0] == \"/\"):\n return False\n if not Definitions.PATH_RE.match(value):\n return False\n return True", "response": "Validate a cookie path ASCII string"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nvalidating a cookie Max - Age", "response": "def valid_max_age(number):\n \"Validate a cookie Max-Age\"\n if isinstance(number, basestring):\n try:\n number = long(number)\n except (ValueError, TypeError):\n return False\n if number >= 0 and number % 1 == 0:\n return True\n return False"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef render_date(date):\n if not date:\n return None\n assert valid_date(date)\n # Avoid %a and %b, which can change with locale, breaking compliance\n weekday = Definitions.weekday_abbr_list[date.weekday()]\n month = Definitions.month_abbr_list[date.month - 1]\n return date.strftime(\"{day}, %d {month} %Y %H:%M:%S GMT\"\n ).format(day=weekday, month=month)", "response": "Render a date in a ISO - 8601 format."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _parse_request(header_data, ignore_bad_cookies=False):\n cookies_dict = {}\n for line in Definitions.EOL.split(header_data.strip()):\n matches = Definitions.COOKIE_RE.finditer(line)\n matches = [item for item in matches]\n for match in matches:\n invalid = match.group('invalid')\n if invalid:\n if not ignore_bad_cookies:\n raise InvalidCookieError(data=invalid)\n _report_invalid_cookie(invalid)\n continue\n name = match.group('name')\n values = cookies_dict.get(name)\n value = match.group('value').strip('\"')\n if values:\n values.append(value)\n else:\n cookies_dict[name] = [value]\n if not matches:\n if not ignore_bad_cookies:\n raise InvalidCookieError(data=line)\n _report_invalid_cookie(line)\n return cookies_dict", "response": "Turn one or more lines of Cookie : header data into a dict mapping cookie names to cookie values."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nturn one Set - Cookie line into a dict mapping attribute names to attribute values.", "response": "def parse_one_response(line, ignore_bad_cookies=False,\n ignore_bad_attributes=True):\n \"\"\"Turn one 'Set-Cookie:' line into a dict mapping attribute names to\n attribute values (raw strings).\n \"\"\"\n cookie_dict = {}\n # Basic validation, extract name/value/attrs-chunk\n match = Definitions.SET_COOKIE_HEADER_RE.match(line)\n if not match:\n if not ignore_bad_cookies:\n raise InvalidCookieError(data=line)\n _report_invalid_cookie(line)\n return None\n cookie_dict.update({\n 'name': match.group('name'),\n 'value': match.group('value')})\n # Extract individual attrs from the attrs chunk\n for match in Definitions.ATTR_RE.finditer(match.group('attrs')):\n captured = dict((k, v) for (k, v) in match.groupdict().items() if v)\n unrecognized = captured.get('unrecognized', None)\n if unrecognized:\n if not ignore_bad_attributes:\n raise InvalidCookieAttributeError(None, unrecognized,\n \"unrecognized\")\n _report_unknown_attribute(unrecognized)\n continue\n # for unary flags\n for key in ('secure', 'httponly'):\n if captured.get(key):\n captured[key] = True\n # ignore subcomponents of expires - they're still there to avoid doing\n # two passes\n timekeys = ('weekday', 'month', 'day', 'hour', 'minute', 'second',\n 'year')\n if 'year' in captured:\n for key in timekeys:\n del captured[key]\n elif 'year2' in captured:\n for key in timekeys:\n del captured[key + \"2\"]\n cookie_dict.update(captured)\n return cookie_dict"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nturn one or more lines of Set - Cookie header data into a list of dicts mapping attribute names to attribute values.", "response": "def _parse_response(header_data, ignore_bad_cookies=False,\n ignore_bad_attributes=True):\n \"\"\"Turn one or more lines of 'Set-Cookie:' header data into a list of dicts\n mapping attribute names to attribute values (as plain strings).\n \"\"\"\n cookie_dicts = []\n for line in Definitions.EOL.split(header_data.strip()):\n if not line:\n break\n cookie_dict = parse_one_response(\n line, ignore_bad_cookies=ignore_bad_cookies,\n ignore_bad_attributes=ignore_bad_attributes)\n if not cookie_dict:\n continue\n cookie_dicts.append(cookie_dict)\n if not cookie_dicts:\n if not ignore_bad_cookies:\n raise InvalidCookieError(data=header_data)\n _report_invalid_cookie(header_data)\n return cookie_dicts"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef from_dict(cls, cookie_dict, ignore_bad_attributes=True):\n name = cookie_dict.get('name', None)\n if not name:\n raise InvalidCookieError(\"Cookie must have name\")\n raw_value = cookie_dict.get('value', '')\n # Absence or failure of parser here is fatal; errors in present name\n # and value should be found by Cookie.__init__.\n value = cls.attribute_parsers['value'](raw_value)\n cookie = cls(name, value)\n\n # Parse values from serialized formats into objects\n parsed = {}\n for key, value in cookie_dict.items():\n # Don't want to pass name/value to _set_attributes\n if key in ('name', 'value'):\n continue\n parser = cls.attribute_parsers.get(key)\n if not parser:\n # Don't let totally unknown attributes pass silently\n if not ignore_bad_attributes:\n raise InvalidCookieAttributeError(\n key, value, \"unknown cookie attribute '%s'\" % key)\n _report_unknown_attribute(key)\n continue\n try:\n parsed_value = parser(value)\n except Exception as e:\n reason = \"did not parse with %r: %r\" % (parser, e)\n if not ignore_bad_attributes:\n raise InvalidCookieAttributeError(\n key, value, reason)\n _report_invalid_attribute(key, value, reason)\n parsed_value = ''\n parsed[key] = parsed_value\n\n # Set the parsed objects (does object validation automatically)\n cookie._set_attributes(parsed, ignore_bad_attributes)\n return cookie", "response": "Construct a Cookie instance from a dictionary of strings to parse."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nconstructing a Cookie object from a line of Set - Cookie header data.", "response": "def from_string(cls, line, ignore_bad_cookies=False,\n ignore_bad_attributes=True):\n \"Construct a Cookie object from a line of Set-Cookie header data.\"\n cookie_dict = parse_one_response(\n line, ignore_bad_cookies=ignore_bad_cookies,\n ignore_bad_attributes=ignore_bad_attributes)\n if not cookie_dict:\n return None\n return cls.from_dict(\n cookie_dict, ignore_bad_attributes=ignore_bad_attributes)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nvalidates a cookie attribute with an appropriate validator.", "response": "def validate(self, name, value):\n \"\"\"Validate a cookie attribute with an appropriate validator.\n\n The value comes in already parsed (for example, an expires value\n should be a datetime). Called automatically when an attribute\n value is set.\n \"\"\"\n validator = self.attribute_validators.get(name, None)\n if validator:\n return True if validator(value) else False\n return True"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nexport this cookie s attributes as a dict of encoded values.", "response": "def attributes(self):\n \"\"\"Export this cookie's attributes as a dict of encoded values.\n\n This is an important part of the code for rendering attributes, e.g.\n render_response().\n \"\"\"\n dictionary = {}\n # Only look for attributes registered in attribute_names.\n for python_attr_name, cookie_attr_name in self.attribute_names.items():\n value = getattr(self, python_attr_name)\n renderer = self.attribute_renderers.get(python_attr_name, None)\n if renderer:\n value = renderer(value)\n # If renderer returns None, or it's just natively none, then the\n # value is suppressed entirely - does not appear in any rendering.\n if not value:\n continue\n dictionary[cookie_attr_name] = value\n return dictionary"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nrender as a string formatted for HTTP request headers", "response": "def render_request(self):\n \"\"\"Render as a string formatted for HTTP request headers\n (simple 'Cookie: ' style).\n \"\"\"\n # Use whatever renderers are defined for name and value.\n name, value = self.name, self.value\n renderer = self.attribute_renderers.get('name', None)\n if renderer:\n name = renderer(name)\n renderer = self.attribute_renderers.get('value', None)\n if renderer:\n value = renderer(value)\n return ''.join((name, \"=\", value))"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef render_response(self):\n # Use whatever renderers are defined for name and value.\n # (.attributes() is responsible for all other rendering.)\n name, value = self.name, self.value\n renderer = self.attribute_renderers.get('name', None)\n if renderer:\n name = renderer(name)\n renderer = self.attribute_renderers.get('value', None)\n if renderer:\n value = renderer(value)\n return '; '.join(\n ['{0}={1}'.format(name, value)] +\n [key if isinstance(val, bool) else '='.join((key, val))\n for key, val in self.attributes().items()]\n )", "response": "Render as a string formatted for HTTP response headers\n ."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nadd Cookie objects by their names or create new ones under the key.", "response": "def add(self, *args, **kwargs):\n \"\"\"Add Cookie objects by their names, or create new ones under\n specified names.\n\n Any unnamed arguments are interpreted as existing cookies, and\n are added under the value in their .name attribute. With keyword\n arguments, the key is interpreted as the cookie name and the\n value as the UNENCODED value stored in the cookie.\n \"\"\"\n # Only the first one is accessible through the main interface,\n # others accessible through get_all (all_cookies).\n for cookie in args:\n self.all_cookies.append(cookie)\n if cookie.name in self:\n continue\n self[cookie.name] = cookie\n for key, value in kwargs.items():\n cookie = self.cookie_class(key, value)\n self.all_cookies.append(cookie)\n if key in self:\n continue\n self[key] = cookie"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nparsing the request header data into Cookie objects and add them to the Cookies object.", "response": "def parse_request(self, header_data, ignore_bad_cookies=False):\n \"\"\"Parse 'Cookie' header data into Cookie objects, and add them to\n this Cookies object.\n\n :arg header_data: string containing only 'Cookie:' request headers or\n header values (as in CGI/WSGI HTTP_COOKIE); if more than one, they must\n be separated by CRLF (\\\\r\\\\n).\n\n :arg ignore_bad_cookies: if set, will log each syntactically invalid\n cookie (at the granularity of semicolon-delimited blocks) rather than\n raising an exception at the first bad cookie.\n\n :returns: a Cookies instance containing Cookie objects parsed from\n header_data.\n\n .. note::\n If you want to parse 'Set-Cookie:' response headers, please use\n parse_response instead. parse_request will happily turn 'expires=frob'\n into a separate cookie without complaining, according to the grammar.\n \"\"\"\n cookies_dict = _parse_request(\n header_data, ignore_bad_cookies=ignore_bad_cookies)\n cookie_objects = []\n for name, values in cookies_dict.items():\n for value in values:\n # Use from_dict to check name and parse value\n cookie_dict = {'name': name, 'value': value}\n try:\n cookie = self.cookie_class.from_dict(cookie_dict)\n except InvalidCookieError:\n if not ignore_bad_cookies:\n raise\n else:\n cookie_objects.append(cookie)\n try:\n self.add(*cookie_objects)\n except InvalidCookieError:\n if not ignore_bad_cookies:\n raise\n _report_invalid_cookie(header_data)\n return self"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nparse the Set - Cookie header data into Cookie objects and add them to the Cookies object.", "response": "def parse_response(self, header_data, ignore_bad_cookies=False,\n ignore_bad_attributes=True):\n \"\"\"Parse 'Set-Cookie' header data into Cookie objects, and add them to\n this Cookies object.\n\n :arg header_data: string containing only 'Set-Cookie:' request headers\n or their corresponding header values; if more than one, they must be\n separated by CRLF (\\\\r\\\\n).\n\n :arg ignore_bad_cookies: if set, will log each syntactically invalid\n cookie rather than raising an exception at the first bad cookie. (This\n includes cookies which have noncompliant characters in the attribute\n section).\n\n :arg ignore_bad_attributes: defaults to True, which means to log but\n not raise an error when a particular attribute is unrecognized. (This\n does not necessarily mean that the attribute is invalid, although that\n would often be the case.) if unset, then an error will be raised at the\n first semicolon-delimited block which has an unknown attribute.\n\n :returns: a Cookies instance containing Cookie objects parsed from\n header_data, each with recognized attributes populated.\n\n .. note::\n If you want to parse 'Cookie:' headers (i.e., data like what's sent\n with an HTTP request, which has only name=value pairs and no\n attributes), then please use parse_request instead. Such lines often\n contain multiple name=value pairs, and parse_response will throw away\n the pairs after the first one, which will probably generate errors or\n confusing behavior. (Since there's no perfect way to automatically\n determine which kind of parsing to do, you have to tell it manually by\n choosing correctly from parse_request between part_response.)\n \"\"\"\n cookie_dicts = _parse_response(\n header_data,\n ignore_bad_cookies=ignore_bad_cookies,\n ignore_bad_attributes=ignore_bad_attributes)\n cookie_objects = []\n for cookie_dict in cookie_dicts:\n cookie = self.cookie_class.from_dict(cookie_dict)\n cookie_objects.append(cookie)\n self.add(*cookie_objects)\n return self"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nconstructs a Cookies object from request header data.", "response": "def from_request(cls, header_data, ignore_bad_cookies=False):\n \"Construct a Cookies object from request header data.\"\n cookies = cls()\n cookies.parse_request(\n header_data, ignore_bad_cookies=ignore_bad_cookies)\n return cookies"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef from_response(cls, header_data, ignore_bad_cookies=False,\n ignore_bad_attributes=True):\n \"Construct a Cookies object from response header data.\"\n cookies = cls()\n cookies.parse_response(\n header_data,\n ignore_bad_cookies=ignore_bad_cookies,\n ignore_bad_attributes=ignore_bad_attributes)\n return cookies", "response": "Construct a Cookies object from response header data."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nrenders the dict s Cookie objects into a string formatted for HTTP request headers.", "response": "def render_request(self, sort=True):\n \"\"\"Render the dict's Cookie objects into a string formatted for HTTP\n request headers (simple 'Cookie: ' style).\n \"\"\"\n if not sort:\n return (\"; \".join(\n cookie.render_request() for cookie in self.values()))\n return (\"; \".join(sorted(\n cookie.render_request() for cookie in self.values())))"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef render_response(self, sort=True):\n rendered = [cookie.render_response() for cookie in self.values()]\n return rendered if not sort else sorted(rendered)", "response": "Render the dict s Cookie objects into a list of strings formatted for HTTP response headers."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef aorb(a, b):\n return matrix(np.logical_or(a, b).astype('float'), a.size)", "response": "Return a matrix of logic comparison of A or B"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn a matrix of logic comparison of A or B", "response": "def aandb(a, b):\n \"\"\"Return a matrix of logic comparison of A or B\"\"\"\n return matrix(np.logical_and(a, b).astype('float'), a.size)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef not0(a):\n return matrix(list(map(lambda x: 1 if x == 0 else x, a)), a.size)", "response": "Return u if u! = 0"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns sorted list of objects in order of name", "response": "def sort(m, reverse=False):\n \"\"\"Return sorted m (default: ascending order)\"\"\"\n ty = type(m)\n if ty == matrix:\n m = list(m)\n m = sorted(m, reverse=reverse)\n if ty == matrix:\n m = matrix(m)\n return m"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef sort_idx(m, reverse=False):\n return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse)", "response": "Return the indices of m in sorted order ( default : ascending order"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the indices of all the val in m", "response": "def index(m, val):\n \"\"\"\n Return the indices of all the ``val`` in ``m``\n \"\"\"\n mm = np.array(m)\n idx_tuple = np.where(mm == val)\n idx = idx_tuple[0].tolist()\n\n return idx"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef to_number(s):\n ret = s\n # try converting to float\n try:\n ret = float(s)\n except ValueError:\n ret = ret.strip('\\'').strip()\n\n # try converting to uid\n try:\n ret = int(s)\n except ValueError:\n pass\n\n # try converting to boolean\n if ret == 'True':\n ret = True\n elif ret == 'False':\n ret = False\n elif ret == 'None':\n ret = None\n return ret", "response": "Convert a string to a number."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsafing division of two sets of resources.", "response": "def sdiv(a, b):\n \"\"\"Safe division: if a == b == 0, sdiv(a, b) == 1\"\"\"\n if len(a) != len(b):\n raise ValueError('Argument a and b does not have the same length')\n idx = 0\n ret = matrix(0, (len(a), 1), 'd')\n for m, n in zip(a, b):\n try:\n ret[idx] = m / n\n except ZeroDivisionError:\n ret[idx] = 1\n finally:\n idx += 1\n return ret"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_config_load_path(conf_path=None):\n\n if conf_path is None:\n # test ./andes.conf\n if os.path.isfile('andes.conf'):\n conf_path = 'andes.conf'\n # test ~/andes.conf\n home_dir = os.path.expanduser('~')\n if os.path.isfile(os.path.join(home_dir, '.andes', 'andes.conf')):\n conf_path = os.path.join(home_dir, '.andes', 'andes.conf')\n\n if conf_path is not None:\n logger.debug('Found config file at {}.'.format(conf_path))\n\n return conf_path", "response": "Returns the path to the config file that is loaded by the user."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a directory where the log files are stored.", "response": "def get_log_dir():\n \"\"\"\n Get a directory for logging\n\n On Linux or macOS, '/tmp/andes' is the default. On Windows,\n '%APPDATA%/andes' is the default.\n\n Returns\n -------\n str\n Path to the logging directory\n \"\"\"\n PATH = ''\n if platform.system() in ('Linux', 'Darwin'):\n PATH = tempfile.mkdtemp(prefix='andes-')\n\n elif platform.system() == 'Windows':\n APPDATA = os.getenv('APPDATA')\n PATH = os.path.join(APPDATA, 'andes')\n\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n return PATH"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nstores the state and algeb values at time t.", "response": "def store(self, t, step):\n \"\"\"\n Record the state/algeb values at time t to self.vars\n \"\"\"\n max_cache = int(self.system.tds.config.max_cache)\n if len(self.vars) >= max_cache > 0:\n self.dump()\n self.vars = list()\n self.t = list()\n self.k = list()\n logger.debug(\n 'varout cache cleared at simulation t = {:g}.'.format(\n self.system.dae.t))\n self._mode = 'a'\n\n var_data = matrix([self.system.dae.x, self.system.dae.y])\n\n # ===== This code block is deprecated =====\n self.t.append(t)\n self.k.append(step)\n self.vars.append(var_data)\n # =========================================\n\n # clear data cache if written to disk\n if self.np_nrows >= max_cache > 0:\n self.dump_np_vars()\n self.np_vars = np.zeros(self._np_block_shape)\n self.np_nrows = 0\n self.np_t = np.zeros((self._np_block_rows,))\n self.np_k = np.zeros((self._np_block_rows,))\n logger.debug(\n 'np_vars cache cleared at simulation t = {:g}.'.format(\n self.system.dae.t))\n self._mode = 'a'\n\n # initialize before first-time adding data\n if self.np_nrows == 0:\n self.np_ncols = len(var_data)\n self._np_block_shape = (self._np_block_rows, self.np_ncols)\n self.np_vars = np.zeros(self._np_block_shape)\n self.np_t = np.zeros((self._np_block_rows,))\n self.np_k = np.zeros((self._np_block_rows,))\n\n # adding data to the matrix\n # self.np_vars[self.np_nrows, 0] = t\n self.np_t[self.np_nrows] = t\n self.np_k[self.np_nrows] = step\n self.np_vars[self.np_nrows, :] = np.array(var_data).reshape((-1))\n self.np_nrows += 1\n\n # check if matrix extension is needed\n if self.np_nrows >= self.np_vars.shape[0]:\n self.np_vars = np.concatenate([self.np_vars, np.zeros(self._np_block_shape)], axis=0)\n self.np_t = np.concatenate([self.np_t, np.zeros((self._np_block_rows,))], axis=0)\n self.np_k = np.concatenate([self.np_k, np.zeros((self._np_block_rows,))], axis=0)\n\n # remove the post-computed variables from the variable list\n if self.system.tds.config.compute_flows:\n self.system.dae.y = self.system.dae.y[:self.system.dae.m]"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the full result matrix of the object.", "response": "def show(self):\n \"\"\"\n The representation of an Varout object\n\n :return: the full result matrix (for use with PyCharm viewer)\n :rtype: np.array\n \"\"\"\n out = []\n\n for item in self.vars:\n out.append(list(item))\n\n return np.array(out)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef concat_t_vars(self):\n logger.warning('This function is deprecated and replaced by `concat_t_vars_np`.')\n out = np.array([])\n\n if len(self.t) == 0:\n return out\n\n out = np.ndarray(shape=(0, self.vars[0].size[0] + 1))\n\n for t, var in zip(self.t, self.vars):\n line = [[t]]\n line[0].extend(list(var))\n out = np.append(out, line, axis=0)\n\n return out", "response": "Concatenate self. t with self. vars and output a single matrix containing the 0 - th column of the data dump\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nconcatenate self. np_t with self. np_vars and return a single matrix.", "response": "def concat_t_vars_np(self, vars_idx=None):\n \"\"\"\n Concatenate `self.np_t` with `self.np_vars` and return a single matrix.\n The first column corresponds to time, and the rest of the matrix is the variables.\n\n Returns\n -------\n np.array : concatenated matrix\n\n \"\"\"\n selected_np_vars = self.np_vars\n if vars_idx is not None:\n selected_np_vars = self.np_vars[:, vars_idx]\n\n return np.concatenate([self.np_t[:self.np_nrows].reshape((-1, 1)),\n selected_np_vars[:self.np_nrows, :]], axis=1)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_xy(self, yidx, xidx=0):\n assert isinstance(xidx, int)\n if isinstance(yidx, int):\n yidx = [yidx]\n\n t_vars = self.concat_t_vars()\n\n xdata = t_vars[:, xidx]\n ydata = t_vars[:, yidx]\n\n return xdata.tolist(), ydata.transpose().tolist()", "response": "Return the x and y data for the given indices for plot\n\n "} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef dump_np_vars(self, store_format='csv', delimiter=','):\n\n ret = False\n\n if self.system.files.no_output is True:\n logger.debug('no_output is True, thus no TDS dump saved ')\n return True\n\n if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter):\n ret = True\n\n return ret", "response": "Dump the TDS simulation data to files by calling subroutines write_lst and write_np_dat."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndump the TDS results to the output lst file", "response": "def dump(self):\n \"\"\"\n Dump the TDS results to the output `dat` file\n\n :return: succeed flag\n \"\"\"\n logger.warn('This function is deprecated and replaced by `dump_np_vars`.')\n\n ret = False\n\n if self.system.files.no_output:\n # return ``True`` because it did not fail\n return True\n\n if self.write_lst() and self.write_dat():\n ret = True\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef write_np_dat(self, store_format='csv', delimiter=',', fmt='%.12g'):\n ret = False\n system = self.system\n\n # compute the total number of columns, excluding time\n if not system.Recorder.n:\n n_vars = system.dae.m + system.dae.n\n # post-computed power flows include:\n # bus - (Pi, Qi)\n # line - (Pij, Pji, Qij, Qji, Iij_Real, Iij_Imag, Iji_real, Iji_Imag)\n if system.tds.config.compute_flows:\n n_vars += 2 * system.Bus.n + 8 * system.Line.n + 2 * system.Area.n_combination\n idx = list(range(n_vars))\n\n else:\n n_vars = len(system.Recorder.varout_idx)\n idx = system.Recorder.varout_idx\n\n # prepare data\n t_vars_concatenated = self.concat_t_vars_np(vars_idx=idx)\n\n try:\n os.makedirs(os.path.abspath(os.path.dirname(system.files.dat)), exist_ok=True)\n with open(system.files.dat, self._mode) as f:\n if store_format in ('csv', 'txt'):\n np.savetxt(f, t_vars_concatenated, fmt=fmt, delimiter=delimiter)\n elif store_format == 'hdf5':\n pass\n ret = True\n logger.info('TDS data dumped to <{}>'.format(system.files.dat))\n\n except IOError:\n logger.error('I/O Error while writing the dat file.')\n\n return ret", "response": "Writes TDS data stored in self. np_vars to the output file."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nwriting the data of the current variable set to a. dat file.", "response": "def write_dat(self):\n \"\"\"\n Write ``system.Varout.vars`` to a ``.dat`` file\n :return:\n \"\"\"\n logger.warn('This function is deprecated and replaced by `write_np_dat`.')\n\n ret = False\n system = self.system\n\n # compute the total number of columns, excluding time\n if not system.Recorder.n:\n n_vars = system.dae.m + system.dae.n\n # post-computed power flows include:\n # bus - (Pi, Qi)\n # line - (Pij, Pji, Qij, Qji, Iij_Real, Iij_Imag, Iji_real, Iji_Imag)\n if system.tds.config.compute_flows:\n n_vars += 2 * system.Bus.n + 8 * system.Line.n + 2 * system.Area.n_combination\n idx = list(range(n_vars))\n\n else:\n n_vars = len(system.Recorder.varout_idx)\n idx = system.Recorder.varout_idx\n\n template = ['{:<8g}'] + ['{:0.10f}'] * n_vars\n template = ' '.join(template)\n\n # format the output in a string\n out = ''\n for t, line in zip(self.t, self.vars):\n values = [t] + list(line[idx])\n out += template.format(*values) + '\\n'\n\n try:\n os.makedirs(os.path.abspath(os.path.dirname(system.files.dat)), exist_ok=True)\n with open(system.files.dat, self._mode) as f:\n f.write(out)\n ret = True\n\n except IOError:\n logger.error('I/O Error while writing the dat file.')\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ndump the variable name lst file and return True if it succeed False if it fails", "response": "def write_lst(self):\n \"\"\"\n Dump the variable name lst file\n\n :return: succeed flag\n \"\"\"\n\n ret = False\n out = ''\n system = self.system\n dae = self.system.dae\n varname = self.system.varname\n template = '{:>6g}, {:>25s}, {:>35s}\\n'\n\n # header line\n out += template.format(0, 'Time [s]', '$Time\\\\ [s]$')\n\n # include line flow variables in algebraic variables\n nflows = 0\n if self.system.tds.config.compute_flows:\n nflows = 2 * self.system.Bus.n + \\\n 8 * self.system.Line.n + \\\n 2 * self.system.Area.n_combination\n\n # output variable indices\n if system.Recorder.n == 0:\n state_idx = list(range(dae.n))\n algeb_idx = list(range(dae.n, dae.n + dae.m + nflows))\n idx = state_idx + algeb_idx\n else:\n idx = system.Recorder.varout_idx\n\n # variable names concatenated\n uname = varname.unamex + varname.unamey\n fname = varname.fnamex + varname.fnamey\n\n for e, i in enumerate(idx):\n out += template.format(e + 1, uname[i], fname[i])\n\n try:\n with open(self.system.files.lst, 'w') as f:\n f.write(out)\n ret = True\n except IOError:\n logger.error('I/O Error while writing the lst file.')\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nconvert self. vars to a numpy array", "response": "def vars_to_array(self):\n \"\"\"\n Convert `self.vars` to a numpy array\n\n Returns\n -------\n numpy.array\n \"\"\"\n\n logger.warn('This function is deprecated. You can inspect `self.np_vars` directly as NumPy arrays '\n 'without conversion.')\n if not self.vars:\n return None\n\n vars_matrix = matrix(self.vars, size=(self.vars[0].size[0],\n len(self.vars))).trans()\n\n self.vars_array = np.array(vars_matrix)\n\n return self.vars_array"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nconfigure a logger for the andes package.", "response": "def config_logger(name='andes',\n log_file='andes.log',\n log_path='',\n stream=True,\n stream_level=logging.INFO\n ):\n \"\"\"\n Configure a logger for the andes package with options for a `FileHandler`\n and a `StreamHandler`. This function is called at the beginning of\n ``andes.main.main()``.\n\n Parameters\n ----------\n name : str, optional\n Base logger name, ``'andes'`` by default. Changing this\n parameter will affect the loggers in modules and\n cause unexpected behaviours.\n log_file : str, optional\n Logg file name for `FileHandler`, ``'andes.log'`` by default.\n If ``None``, the `FileHandler` will not be created.\n log_path : str, optional\n Path to store the log file. By default, the path is generated by\n get_log_dir() in utils.misc.\n stream : bool, optional\n Create a `StreamHandler` for `stdout` if ``True``.\n If ``False``, the handler will not be created.\n stream_level : {10, 20, 30, 40, 50}, optional\n `StreamHandler` verbosity level.\n\n Returns\n -------\n None\n\n \"\"\"\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # file handler for level DEBUG and up\n if log_file is not None:\n log_full_path = os.path.join(log_path, log_file)\n fh_formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh = logging.FileHandler(log_full_path)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(fh_formatter)\n logger.addHandler(fh)\n logger.debug('Writing log to {}'.format(log_full_path))\n\n if stream is True:\n sh_formatter = logging.Formatter('%(message)s')\n sh = logging.StreamHandler()\n\n sh.setFormatter(sh_formatter)\n sh.setLevel(stream_level)\n logger.addHandler(sh)\n\n globals()['logger'] = logger"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nlog the Andes command - line preamble at the logging. INFO level Returns ------- None", "response": "def preamble():\n \"\"\"\n Log the Andes command-line preamble at the `logging.INFO` level\n\n Returns\n -------\n None\n \"\"\"\n from . import __version__ as version\n logger.info('ANDES {ver} (Build {b}, Python {p} on {os})'\n .format(ver=version[:5], b=version[-8:],\n p=platform.python_version(),\n os=platform.system()))\n try:\n username = os.getlogin() + ', '\n except OSError:\n username = ''\n\n logger.info('Session: {}{}'.format(username,\n strftime(\"%m/%d/%Y %I:%M:%S %p\")))\n logger.info('')"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nconstruct a CLI argument parser and return the parsed command - line arguments.", "response": "def cli_new():\n \"\"\"\n Construct a CLI argument parser and return the parsed arguments.\n\n Returns\n -------\n Namespace\n A namespace object containing the parsed command-line arguments\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('filename', help='Case file name', nargs='*')\n\n # general options\n general_group = parser.add_argument_group('General options')\n general_group.add_argument('-r', '--routine', choices=routines.__cli__,\n help='Routine to run', nargs='*',\n default=['pflow'], )\n general_group.add_argument('--edit-config', help='Quick edit of the config file',\n default='', nargs='?', type=str)\n general_group.add_argument('--license', action='store_true', help='Display software license')\n\n # I/O\n io_group = parser.add_argument_group('I/O options', 'Optional arguments for managing I/Os')\n io_group.add_argument('-p', '--path', help='Path to case files', type=str, default='', dest='input_path')\n io_group.add_argument('-a', '--addfile', help='Additional files used by some formats.')\n io_group.add_argument('-D', '--dynfile', help='Additional dynamic file in dm format.')\n io_group.add_argument('-P', '--pert', help='Perturbation file path', default='')\n io_group.add_argument('-d', '--dump-raw', help='Dump RAW format case file.')\n io_group.add_argument('-n', '--no-output', help='Force no output of any '\n 'kind',\n action='store_true')\n io_group.add_argument('-o', '--output_path', help='Output path prefix', type=str, default='')\n io_group.add_argument('-C', '--clean', help='Clean output files', action='store_true')\n\n config_exclusive = parser.add_mutually_exclusive_group()\n config_exclusive.add_argument('--load-config', help='path to the rc config to load',\n dest='config')\n config_exclusive.add_argument('--save-config', help='save configuration to file name',\n nargs='?', type=str, default='')\n\n # helps and documentations\n group_help = parser.add_argument_group('Help and documentation',\n 'Optional arguments for usage, model and config documentation')\n group_help.add_argument(\n '-g', '--group', help='Show the models in the group.')\n group_help.add_argument(\n '-q', '--quick-help', help='Show a quick help of model format.')\n group_help.add_argument(\n '-c',\n '--category',\n help='Show model names in the given category.')\n group_help.add_argument(\n '-l',\n '--model-list',\n help='Show a full list of all models.',\n action='store_true')\n group_help.add_argument(\n '-f',\n '--model-format',\n help='Show the format definition of models.', nargs='*', default=[])\n group_help.add_argument(\n '-Q',\n '--model-var',\n help='Show the definition of variables .')\n group_help.add_argument(\n '--config-option', help='Show a quick help of a config option ')\n group_help.add_argument(\n '--help-config',\n help='Show help of the class. Use ALL for all configs.')\n group_help.add_argument(\n '-s',\n '--search',\n help='Search for models that match the pattern.')\n\n # simulation control\n sim_options = parser.add_argument_group('Simulation control options',\n 'Overwrites the simulation configs')\n sim_options.add_argument(\n '--dime', help='Specify DiME streaming server address and port')\n sim_options.add_argument(\n '--tf', help='End time of time-domain simulation', type=float)\n\n # developer options\n dev_group = parser.add_argument_group('Developer options', 'Options for developer debugging')\n dev_group.add_argument(\n '-v',\n '--verbose',\n help='Program logging level.'\n 'Available levels are 10-DEBUG, 20-INFO, 30-WARNING, '\n '40-ERROR or 50-CRITICAL. The default level is 20-INFO',\n type=int, default=20, choices=(10, 20, 30, 40, 50))\n dev_group.add_argument(\n '--profile', action='store_true', help='Enable Python cProfiler')\n dev_group.add_argument(\n '--ncpu', help='Number of parallel processes', type=int, default=0)\n dev_group.add_argument(\n '--show-data', type=str, help='Show model data converted to system base', nargs='*'\n )\n dev_group.add_argument(\n '-x', '--exit', help='Exit before running routine', action='store_true'\n )\n\n args = parser.parse_args()\n\n return args"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nprinting the requested help and documentation to stdout.", "response": "def andeshelp(group=None,\n category=None,\n model_list=None,\n model_format=None,\n model_var=None,\n quick_help=None,\n help_option=None,\n help_config=None,\n export='plain',\n **kwargs):\n \"\"\"\n Print the requested help and documentation to stdout.\n\n Parameters\n ----------\n group : None or str\n Name of a group whose model names will be printed\n\n category : None or str\n Name of a category whose models will be printed\n\n model_list : bool\n If ``True``, print the full model list.\n\n model_format : None or str\n Names of models whose parameter definitions will be printed. Model\n names are separated by comma without space.\n\n model_var : None or str\n A pair of model name and parameter name separated by dot. For\n example, ``Bus.voltage`` stands for the ``voltage`` parameter of\n ``Bus``.\n\n quick_help : None or str\n Name of a model to print a quick help of parameter definitions.\n\n help_option : None or str\n A pair of config name and option name separated by dot. For example,\n ``System.sparselib`` stands for the ``sparselib`` option of\n the ``System`` config.\n\n help_config : None or str\n Config names whose option definitions will be printed. Configs are\n separated by comma without space. For example, ``System,Pflow``. In\n the naming convention, the first letter is captialized.\n\n export : None or {'plain', 'latex'}\n Formatting style available in plain text or LaTex format. This option\n has not been implemented.\n\n kwargs : None or dict\n Other keyword arguments\n\n Returns\n -------\n bool\n True if any help function is executed.\n\n Notes\n -----\n The outputs can be written to a text file using shell command,\n for example, ::\n\n andes -q Bus > bus_help.txt\n\n \"\"\"\n out = []\n\n if not (group or category or model_list or model_format\n or model_var or quick_help or help_option or help_config):\n return False\n\n from .models import all_models_list\n\n if category:\n raise NotImplementedError\n\n if model_list:\n raise NotImplementedError\n\n system = PowerSystem()\n\n if model_format:\n if model_format.lower() == 'all':\n model_format = all_models_list\n else:\n model_format = model_format.split(',')\n\n for item in model_format:\n if item not in all_models_list:\n logger.warning('Model <{}> does not exist.'.format(item))\n model_format.remove(item)\n\n if len(model_format) > 0:\n for item in model_format:\n out.append(system.__dict__[item].doc(export=export))\n\n if model_var:\n model_var = model_var.split('.')\n\n if len(model_var) == 1:\n logger.error('Model and parameter not separated by dot.')\n\n elif len(model_var) > 2:\n logger.error('Model parameter not specified correctly.')\n\n else:\n dev, var = model_var\n if not hasattr(system, dev):\n logger.error('Model <{}> does not exist.'.format(dev))\n else:\n if var not in system.__dict__[dev]._data.keys():\n logger.error(\n 'Model <{}> does not have parameter <{}>.'.format(\n dev, var))\n else:\n c1 = system.__dict__[dev]._descr.get(var, 'no Description')\n c2 = system.__dict__[dev]._data.get(var)\n c3 = system.__dict__[dev]._units.get(var, 'no Unit')\n out.append('{}: {}, default = {:g} {}'.format(\n '.'.join(model_var), c1, c2, c3))\n\n if group:\n group_dict = {}\n match = []\n\n for model in all_models_list:\n g = system.__dict__[model]._group\n if g not in group_dict:\n group_dict[g] = []\n group_dict[g].append(model)\n\n if group.lower() == 'all':\n match = sorted(list(group_dict.keys()))\n\n else:\n group = [group]\n # search for ``group`` in all group names and store in ``match``\n for item in group_dict.keys():\n if group[0].lower() in item.lower():\n match.append(item)\n\n # if group name not found\n if len(match) == 0:\n out.append('Group <{:s}> not found.'.format(group[0]))\n\n for item in match:\n group_models = sorted(list(group_dict[item]))\n out.append('<{:s}>'.format(item))\n out.append(', '.join(group_models))\n out.append('')\n\n if quick_help:\n if quick_help not in all_models_list:\n out.append('Model <{}> does not exist.'.format(quick_help))\n else:\n out.append(system.__dict__[quick_help].doc(export=export))\n\n if help_option:\n raise NotImplementedError\n\n if help_config:\n\n all_config = ['system', 'pflow', 'tds', 'eig']\n\n if help_config.lower() == 'all':\n help_config = all_config\n\n else:\n help_config = help_config.split(',')\n\n for item in help_config:\n if item.lower() not in all_config:\n logger.warning('Config <{}> does not exist.'.format(item))\n help_config.remove(item)\n\n if len(help_config) > 0:\n for item in help_config:\n if item == 'system':\n out.append(system.config.doc(export=export))\n else:\n out.append(system.__dict__[item.lower()].config.doc(\n export=export))\n\n print('\\n'.join(out)) # NOQA\n\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef edit_conf(edit_config=False, load_config=None, **kwargs):\n ret = False\n\n # no `edit-config` supplied\n if edit_config == '':\n return ret\n\n conf_path = misc.get_config_load_path(load_config)\n\n if conf_path is not None:\n logger.info('Editing config file {}'.format(conf_path))\n\n if edit_config is None:\n # use the following default editors\n if platform.system() == 'Linux':\n editor = os.environ.get('EDITOR', 'gedit')\n elif platform.system() == 'Darwin':\n editor = os.environ.get('EDITOR', 'vim')\n elif platform.system() == 'Windows':\n editor = 'notepad.exe'\n else:\n # use `edit_config` as default editor\n editor = edit_config\n\n call([editor, conf_path])\n ret = True\n\n else:\n logger.info('Config file does not exist. Save config with \\'andes '\n '--save-config\\'')\n ret = True\n\n return ret", "response": "Edit the Andes config file which occurs first in the search path."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef remove_output(clean=False, **kwargs):\n if not clean:\n return False\n\n found = False\n cwd = os.getcwd()\n\n for file in os.listdir(cwd):\n if file.endswith('_eig.txt') or \\\n file.endswith('_out.txt') or \\\n file.endswith('_out.lst') or \\\n file.endswith('_out.dat') or \\\n file.endswith('_prof.txt'):\n found = True\n try:\n os.remove(file)\n logger.info('<{:s}> removed.'.format(file))\n except IOError:\n logger.error('Error removing file <{:s}>.'.format(file))\n if not found:\n logger.info('no output found in the working directory.')\n\n return True", "response": "Removes the output files generated by Andes including power flow reports time - domain list and data."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef search(search, **kwargs):\n\n from .models import all_models\n out = []\n\n if not search:\n return out\n\n keys = sorted(list(all_models.keys()))\n\n for key in keys:\n vals = all_models[key]\n val = list(vals.keys())\n val = sorted(val)\n\n for item in val:\n if search.lower() in item.lower():\n out.append(key + '.' + item)\n\n if out:\n print('Search result: containing <{}>'\n .format(search))\n print(' '.join(out))\n else:\n print('No model containing <{:s}> found'.format(search))\n\n return out", "response": "Search for models whose names match the given pattern."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef save_config(save_config='', **kwargs):\n ret = False\n cf_path = save_config\n\n # no ``--save-config ``\n if cf_path == '':\n return ret\n\n if cf_path is None:\n cf_path = 'andes.conf'\n home = str(pathlib.Path.home())\n\n path = os.path.join(home, '.andes')\n if not os.path.exists(path):\n os.makedirs(path)\n\n cf_path = os.path.join(path, cf_path)\n\n ps = PowerSystem()\n ps.dump_config(cf_path)\n ret = True\n\n return ret", "response": "Save the Andes config to a file at the path specified by save_config."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef main():\n t0, s = elapsed()\n\n # parser command line arguments\n args = vars(cli_new())\n\n # configure stream handler verbose level\n config_logger(log_path=misc.get_log_dir(), stream_level=args['verbose'])\n\n # show preamble\n preamble()\n\n logger.debug('command line arguments:')\n logger.debug(pprint.pformat(args))\n\n if andeshelp(**args) or search(**args) or edit_conf(**args) or remove_output(**args) \\\n or save_config(**args):\n return\n\n # process input files\n if len(args['filename']) == 0:\n logger.info('error: no input file. Try \\'andes -h\\' for help.')\n\n # preprocess cli args\n path = args.get('input_path', os.getcwd())\n ncpu = args['ncpu']\n if ncpu == 0 or ncpu > os.cpu_count():\n ncpu = os.cpu_count()\n\n cases = []\n\n for file in args['filename']:\n # use absolute path for cases which will be respected by FileMan\n full_paths = os.path.abspath(os.path.join(path, file))\n found = glob.glob(full_paths)\n if len(found) == 0:\n logger.info('error: file {} does not exist.'.format(full_paths))\n else:\n cases += found\n\n # remove folders and make cases unique\n cases = list(set(cases))\n valid_cases = []\n for case in cases:\n if os.path.isfile(case):\n valid_cases.append(case)\n\n logger.debug('Found files: ' + pprint.pformat(valid_cases))\n\n if len(valid_cases) <= 0:\n pass\n elif len(valid_cases) == 1:\n run(valid_cases[0], **args)\n else:\n # set verbose level for multi processing\n logger.info('Processing {} jobs on {} CPUs'.format(len(valid_cases), ncpu))\n logger.handlers[1].setLevel(logging.WARNING)\n\n # start processes\n jobs = []\n for idx, file in enumerate(valid_cases):\n args['pid'] = idx\n job = Process(\n name='Process {0:d}'.format(idx),\n target=run,\n args=(file, ),\n kwargs=args)\n jobs.append(job)\n job.start()\n\n start_msg = 'Process {:d} <{:s}> started.'.format(idx, file)\n print(start_msg)\n logger.debug(start_msg)\n\n if (idx % ncpu == ncpu - 1) or (idx == len(valid_cases) - 1):\n sleep(0.1)\n for job in jobs:\n job.join()\n jobs = []\n\n # restore command line output when all jobs are done\n logger.handlers[1].setLevel(logging.INFO)\n\n t0, s0 = elapsed(t0)\n\n if len(valid_cases) == 1:\n logger.info('-> Single process finished in {:s}.'.format(s0))\n elif len(valid_cases) >= 2:\n logger.info('-> Multiple processes finished in {:s}.'.format(s0))\n\n return", "response": "This function is the main function of the Andes command - line tool. It executes the main function of the Andes command - line tool."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nbuilds call validity vector for each device", "response": "def build_vec(self):\n \"\"\"build call validity vector for each device\"\"\"\n for item in all_calls:\n self.__dict__[item] = []\n\n for dev in self.devices:\n for item in all_calls:\n if self.system.__dict__[dev].n == 0:\n val = False\n else:\n val = self.system.__dict__[dev].calls.get(item, False)\n self.__dict__[item].append(val)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nbuilding call string for each device", "response": "def build_strings(self):\n \"\"\"build call string for each device\"\"\"\n for idx, dev in enumerate(self.devices):\n header = 'system.' + dev\n self.gcalls[idx] = header + '.gcall(system.dae)\\n'\n self.fcalls[idx] = header + '.fcall(system.dae)\\n'\n self.gycalls[idx] = header + '.gycall(system.dae)\\n'\n self.fxcalls[idx] = header + '.fxcall(system.dae)\\n'\n self.jac0s[idx] = header + '.jac0(system.dae)\\n'"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _compile_newton(self):\n string = '\"\"\"\\n'\n\n # evaluate algebraic equations g and differential equations f\n string += 'system.dae.init_fg()\\n'\n for pflow, gcall, call in zip(self.pflow, self.gcall, self.gcalls):\n if pflow and gcall:\n string += call\n string += 'system.dae.reset_small_g()\\n'\n string += '\\n'\n for pflow, fcall, call in zip(self.pflow, self.fcall, self.fcalls):\n if pflow and fcall:\n string += call\n string += 'system.dae.reset_small_f()\\n'\n\n # handle islanded buses in algebraic equations\n string += self.gisland\n string += '\\n'\n\n # rebuild constant Jacobian elements if factorization needed\n string += 'if system.dae.factorize:\\n'\n string += ' system.dae.init_jac0()\\n'\n for pflow, jac0, call in zip(self.pflow, self.jac0, self.jac0s):\n if pflow and jac0:\n string += ' ' + call\n string += ' system.dae.temp_to_spmatrix(\\'jac0\\')\\n'\n\n # evaluate Jacobians Gy and Fx\n string += 'system.dae.setup_FxGy()\\n'\n for pflow, gycall, call in zip(self.pflow, self.gycall, self.gycalls):\n if pflow and gycall:\n string += call\n for pflow, fxcall, call in zip(self.pflow, self.fxcall, self.fxcalls):\n if pflow and fxcall:\n string += call\n\n # handle islanded buses in the Jacobian\n string += self.gyisland\n string += 'system.dae.temp_to_spmatrix(\\'jac\\')\\n'\n\n string += '\"\"\"'\n if SHOW_PF_CALL:\n logger.debug(string)\n self.newton = compile(eval(string), '', 'exec')", "response": "Compile the Newton power flow execution."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _compile_fdpf(self):\n string = '\"\"\"\\n'\n string += 'system.dae.init_g()\\n'\n for pflow, gcall, call in zip(self.pflow, self.gcall, self.gcalls):\n if pflow and gcall:\n string += call\n string += 'system.dae.reset_small_g()\\n'\n string += '\\n'\n string += '\"\"\"'\n self.fdpf = compile(eval(string), '', 'exec')", "response": "Compile the fast decoupled power flow execution"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncompiles the load S_gen - S_line - S_shunt - S_load - S_stagen - S_gen - S_line - S_load - S_gen - S_line - S_shunt - S_load - S_stagen - S_load", "response": "def _compile_pfload(self):\n \"\"\"Post power flow computation for load\n S_gen + S_line + [S_shunt - S_load] = 0\n \"\"\"\n string = '\"\"\"\\n'\n string += 'system.dae.init_g()\\n'\n for gcall, pflow, shunt, stagen, call in zip(\n self.gcall, self.pflow, self.shunt, self.stagen, self.gcalls):\n if gcall and pflow and shunt and not stagen:\n string += call\n string += '\\n'\n string += 'system.dae.reset_small_g()\\n'\n string += '\"\"\"'\n self.pfload = compile(eval(string), '', 'exec')"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncompile the post power flow computation for PV and SW", "response": "def _compile_pfgen(self):\n \"\"\"Post power flow computation for PV and SW\"\"\"\n string = '\"\"\"\\n'\n string += 'system.dae.init_g()\\n'\n for gcall, pflow, shunt, series, stagen, call in zip(\n self.gcall, self.pflow, self.shunt, self.series, self.stagen,\n self.gcalls):\n if gcall and pflow and (shunt or series) and not stagen:\n string += call\n string += '\\n'\n string += 'system.dae.reset_small_g()\\n'\n string += '\"\"\"'\n self.pfgen = compile(eval(string), '', 'exec')"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _compile_bus_injection(self):\n string = '\"\"\"\\n'\n for device, series in zip(self.devices, self.series):\n if series:\n string += 'system.' + device + '.gcall(system.dae)\\n'\n string += '\\n'\n string += 'system.dae.reset_small_g()\\n'\n string += self.gisland\n string += '\"\"\"'\n self.bus_injection = compile(eval(string), '', 'exec')", "response": "Compile the bus injections."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncompiling the series flow function for the series device flow", "response": "def _compile_seriesflow(self):\n \"\"\"Post power flow computation of series device flow\"\"\"\n string = '\"\"\"\\n'\n for device, pflow, series in zip(self.devices, self.pflow,\n self.series):\n if pflow and series:\n string += 'system.' + device + '.seriesflow(system.dae)\\n'\n string += '\\n'\n string += '\"\"\"'\n self.seriesflow = compile(eval(string), '', 'exec')"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _compile_int(self):\n string = '\"\"\"\\n'\n\n # evaluate the algebraic equations g\n string += 'system.dae.init_fg(resetz=False)\\n'\n for gcall, call in zip(self.gcall, self.gcalls):\n if gcall:\n string += call\n string += '\\n'\n string += 'system.dae.reset_small_g()\\n'\n\n # handle islands\n string += self.gisland\n\n # evaluate differential equations f\n for fcall, call in zip(self.fcall, self.fcalls):\n if fcall:\n string += call\n string += 'system.dae.reset_small_f()\\n'\n string += '\\n'\n\n fg_string = string + '\"\"\"'\n self.int_fg = compile(eval(fg_string), '', 'exec')\n\n # rebuild constant Jacobian elements if needed\n string += 'if system.dae.factorize:\\n'\n string += ' system.dae.init_jac0()\\n'\n for jac0, call in zip(self.jac0, self.jac0s):\n if jac0:\n string += ' ' + call\n string += ' system.dae.temp_to_spmatrix(\\'jac0\\')\\n'\n\n # evaluate Jacobians Gy and Fx\n string += 'system.dae.setup_FxGy()\\n'\n for gycall, call in zip(self.gycall, self.gycalls):\n if gycall:\n string += call\n string += '\\n'\n for fxcall, call in zip(self.fxcall, self.fxcalls):\n if fxcall:\n string += call\n string += self.gyisland\n string += 'system.dae.temp_to_spmatrix(\\'jac\\')\\n'\n\n string += '\"\"\"'\n if SHOW_INT_CALL:\n logger.debug(string)\n\n self.int = compile(eval(string), '', 'exec')", "response": "compile the int and int_fg functions"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _compile_int_f(self):\n string = '\"\"\"\\n'\n string += 'system.dae.init_f()\\n'\n\n # evaluate differential equations f\n for fcall, call in zip(self.fcall, self.fcalls):\n if fcall:\n string += call\n string += 'system.dae.reset_small_f()\\n'\n string += '\"\"\"'\n self.int_f = compile(eval(string), '', 'exec')", "response": "Compile the int_f attribute of the object"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncompiling the int_g expression for the time domain simulation.", "response": "def _compile_int_g(self):\n \"\"\"Time Domain Simulation - update algebraic equations and Jacobian\"\"\"\n string = '\"\"\"\\n'\n\n # evaluate the algebraic equations g\n string += 'system.dae.init_g()\\n'\n for gcall, call in zip(self.gcall, self.gcalls):\n if gcall:\n string += call\n string += '\\n'\n string += 'system.dae.reset_small_g()\\n'\n\n # handle islands\n string += self.gisland\n\n # rebuild constant Jacobian elements if needed\n string += 'if system.dae.factorize:\\n'\n string += ' system.dae.init_jac0()\\n'\n for jac0, call in zip(self.jac0, self.jac0s):\n if jac0:\n string += ' ' + call\n string += ' system.dae.temp_to_spmatrix(\\'jac0\\')\\n'\n\n # evaluate Jacobians Gy\n string += 'system.dae.setup_Gy()\\n'\n for gycall, call in zip(self.gycall, self.gycalls):\n if gycall:\n string += call\n string += '\\n'\n string += self.gyisland\n string += 'system.dae.temp_to_spmatrix(\\'jac\\')\\n'\n\n string += '\"\"\"'\n self.int_g = compile(eval(string), '', 'exec')"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ninitializing the object attributes.", "response": "def _init(self):\n \"\"\"\n Convert model metadata to class attributes.\n\n This function is called automatically after ``define()`` in new\n versions.\n\n :return: None\n \"\"\"\n assert self._name\n assert self._group\n\n # self.n = 0\n self.u = []\n self.name = []\n self.idx = []\n self.uid = {}\n\n if not self._unamey:\n self._unamey = self._algebs\n else:\n assert len(self._unamey) == len(self._algebs)\n\n if not self._unamex:\n self._unamex = self._states\n else:\n assert len(self._unamex) == len(self._states)\n\n for item in self._data.keys():\n self.__dict__[item] = []\n\n for bus in self._ac.keys():\n for var in self._ac[bus]:\n self.__dict__[var] = []\n for node in self._dc.keys():\n for var in self._dc[node]:\n self.__dict__[var] = []\n\n for var in self._states + self._algebs + self._service:\n self.__dict__[var] = []\n\n self._flags['sysbase'] = False\n self._flags['allocate'] = False\n self._flags['address'] = False"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndefining a parameter in the model :param tomatrix: convert this parameter list to matrix :param param: parameter name :param default: parameter default value :param unit: parameter unit :param descr: description :param nonzero: is non-zero :param mandatory: is mandatory :param power: is a power value in the `self.Sn` base :param voltage: is a voltage value in the `self.Vn` base :param current: is a current value in the device base :param z: is an impedance value in the device base :param y: is an admittance value in the device base :param r: is a dc resistance value in the device base :param g: is a dc conductance value in the device base :param dccurrent: is a dc current value in the device base :param dcvoltage: is a dc votlage value in the device base :param time: is a time value in the device base :param event_time: is a variable for timed event :type param: str :type tomatrix: bool :type default: str, float :type unit: str :type descr: str :type nonzero: bool :type mandatory: bool :type power: bool :type voltage: bool :type current: bool :type z: bool :type y: bool :type r: bool :type g: bool :type dccurrent: bool :type dcvoltage: bool :type time: bool :type event_time: bool", "response": "def param_define(self,\n param,\n default,\n unit='',\n descr='',\n tomatrix=True,\n nonzero=False,\n mandatory=False,\n power=False,\n voltage=False,\n current=False,\n z=False,\n y=False,\n r=False,\n g=False,\n dccurrent=False,\n dcvoltage=False,\n time=False,\n event_time=False,\n **kwargs):\n \"\"\"\n Define a parameter in the model\n\n :param tomatrix: convert this parameter list to matrix\n :param param: parameter name\n :param default: parameter default value\n :param unit: parameter unit\n :param descr: description\n :param nonzero: is non-zero\n :param mandatory: is mandatory\n :param power: is a power value in the `self.Sn` base\n :param voltage: is a voltage value in the `self.Vn` base\n :param current: is a current value in the device base\n :param z: is an impedance value in the device base\n :param y: is an admittance value in the device base\n :param r: is a dc resistance value in the device base\n :param g: is a dc conductance value in the device base\n :param dccurrent: is a dc current value in the device base\n :param dcvoltage: is a dc votlage value in the device base\n :param time: is a time value in the device base\n :param event_time: is a variable for timed event\n\n :type param: str\n :type tomatrix: bool\n :type default: str, float\n :type unit: str\n :type descr: str\n :type nonzero: bool\n :type mandatory: bool\n :type power: bool\n :type voltage: bool\n :type current: bool\n :type z: bool\n :type y: bool\n :type r: bool\n :type g: bool\n :type dccurrent: bool\n :type dcvoltage: bool\n :type time: bool\n :type event_time: bool\n \"\"\"\n assert param not in self._data\n assert param not in self._algebs\n assert param not in self._states\n assert param not in self._service\n\n self._data.update({param: default})\n if unit:\n self._units.update({param: unit})\n if descr:\n self._descr.update({param: descr})\n if tomatrix:\n self._params.append(param)\n if nonzero:\n self._zeros.append(param)\n if mandatory:\n self._mandatory.append(param)\n if power:\n self._powers.append(param)\n if voltage:\n self._voltages.append(param)\n if current:\n self._currents.append(param)\n if z:\n self._z.append(param)\n if y:\n self._y.append(param)\n if r:\n self._r.append(param)\n if g:\n self._g.append(param)\n if dccurrent:\n self._dccurrents.append(param)\n if dcvoltage:\n self._dcvoltages.append(param)\n if time:\n self._times.append(param)\n if event_time:\n self._event_times.append(param)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndefining a variable in the model.", "response": "def var_define(self, variable, ty, fname, descr='', uname=''):\n \"\"\"\n Define a variable in the model\n\n :param fname: LaTex formatted variable name string\n :param uname: unformatted variable name string, `variable` as default\n :param variable: variable name\n :param ty: type code in ``('x', 'y')``\n :param descr: variable description\n\n :type variable: str\n :type ty: str\n :type descr: str\n :return:\n \"\"\"\n assert ty in ('x', 'y')\n if not uname:\n uname = variable\n\n if ty == 'x':\n self._states.append(variable)\n self._fnamex.append(fname)\n self._unamex.append(uname)\n if descr:\n self._states_descr.update({variable: descr})\n elif ty == 'y':\n self._algebs.append(variable)\n self._fnamey.append(fname)\n self._unamey.append(uname)\n if descr:\n self._algebs_descr.update({variable: descr})"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nadding a service variable of type ty to this model", "response": "def service_define(self, service, ty):\n \"\"\"\n Add a service variable of type ``ty`` to this model\n\n :param str service: variable name\n :param type ty: variable type\n :return: None\n \"\"\"\n\n assert service not in self._data\n assert service not in self._algebs + self._states\n\n self._service.append(service)\n self._service_ty.append(ty)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the uid of the elements with the given idx", "response": "def get_uid(self, idx):\n \"\"\"\n Return the `uid` of the elements with the given `idx`\n\n :param list, matrix idx: external indices\n :type idx: list, matrix\n :return: a matrix of uid\n \"\"\"\n assert idx is not None\n\n if isinstance(idx, (int, float, str)):\n return self.uid[idx]\n\n ret = []\n for i in idx:\n tmp = self.uid.get(i, None)\n assert tmp is not None, (\n 'Model <{}> does not have element <{}>'.format(self._name, i))\n ret.append(self.uid[i])\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the value of the field with the specified index.", "response": "def get_field(self, field, idx=None, astype=None):\n \"\"\"\n Return `self.field` for the elements labeled by `idx`\n\n :param astype: type cast of the return value\n :param field: field name of this model\n :param idx: element indices, will be the whole list if not specified\n :return: field values\n \"\"\"\n assert astype in (None, list, matrix)\n ret = None\n\n if idx is None:\n idx = self.idx\n\n # ===================disable warning ==============================\n # if field in self._service:\n # logger.warning(\n # 'Reading service variable <{model}.{field}> could be unsafe.'\n # .format(field=field, model=self._name)\n # )\n # =================================================================\n\n uid = self.get_uid(idx)\n\n field_data = self.__dict__[field]\n\n if isinstance(field_data, matrix):\n ret = field_data[uid]\n elif isinstance(field_data, list):\n if isinstance(idx, (float, int, str)):\n ret = field_data[uid]\n else:\n ret = [field_data[x] for x in uid]\n\n if astype is not None:\n ret = astype(ret)\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nallocate empty memory for dae variable indices.", "response": "def _alloc(self):\n \"\"\"\n Allocate empty memory for dae variable indices.\n Called in device setup phase.\n\n :return: None\n \"\"\"\n nzeros = [0] * self.n\n for var in self._states:\n self.__dict__[var] = nzeros[:]\n for var in self._algebs:\n self.__dict__[var] = nzeros[:]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef data_to_dict(self, sysbase=False):\n assert isinstance(sysbase, bool)\n\n ret = {}\n\n for key in self.data_keys:\n if (not sysbase) and (key in self._store):\n val = self._store[key]\n else:\n val = self.__dict__[key]\n\n ret[key] = val\n\n return ret", "response": "Return the loaded model parameters as one dictionary."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef data_to_list(self, sysbase=False):\n ret = list()\n\n # for each element\n for i in range(self.n):\n # read the parameter values and put in the temp dict ``e``\n e = {}\n for key in self.data_keys:\n if sysbase and (key in self._store):\n val = self._store[key][i]\n else:\n val = self.__dict__[key][i]\n e[key] = val\n\n ret.append(e)\n\n return ret", "response": "Return the loaded model data as a list of dictionaries."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\npopulating the model parameters from a dictionary of parameters", "response": "def data_from_dict(self, data):\n \"\"\"\n Populate model parameters from a dictionary of parameters\n\n Parameters\n ----------\n data : dict\n List of parameter dictionaries\n\n Returns\n -------\n None\n \"\"\"\n\n nvars = []\n for key, val in data.items():\n self.__dict__[key].extend(val)\n\n # assure the same parameter matrix size\n if len(nvars) > 1 and len(val) != nvars[-1]:\n raise IndexError(\n 'Model <{}> parameter <{}> must have the same length'.\n format(self._name, key))\n nvars.append(len(val))\n\n # assign idx-uid mapping\n for i, idx in zip(range(self.n), self.idx):\n self.uid[idx] = i"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef data_to_df(self, sysbase=False):\n\n p_dict_comp = self.data_to_dict(sysbase=sysbase)\n self._check_pd()\n\n self.param_df = pd.DataFrame(data=p_dict_comp).set_index('idx')\n\n return self.param_df", "response": "Return a pandas. DataFrame of device parameters."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the current var_to_df of variables", "response": "def var_to_df(self):\n \"\"\"\n Return the current var_to_df of variables\n\n :return: pandas.DataFrame\n \"\"\"\n ret = {}\n self._check_pd()\n\n if self._flags['address'] is False:\n return pd.DataFrame.from_dict(ret)\n\n ret.update({'name': self.name})\n ret.update({'idx': self.idx})\n\n for x in self._states:\n idx = self.__dict__[x]\n ret.update({x: self.system.dae.x[idx]})\n for y in self._algebs:\n idx = self.__dict__[y]\n ret.update({y: self.system.dae.y[idx]})\n\n var_df = pd.DataFrame.from_dict(ret).set_index('idx')\n\n return var_df"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nremoves a parameter from this model", "response": "def param_remove(self, param: 'str') -> None:\n \"\"\"\n Remove a param from this model\n\n :param param: name of the parameter to be removed\n :type param: str\n \"\"\"\n for attr in self._param_attr_dicts:\n if param in self.__dict__[attr]:\n self.__dict__[attr].pop(param)\n\n for attr in self._param_attr_lists:\n if param in self.__dict__[attr]:\n self.__dict__[attr].remove(param)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef param_alter(self,\n param,\n default=None,\n unit=None,\n descr=None,\n tomatrix=None,\n nonzero=None,\n mandatory=None,\n power=None,\n voltage=None,\n current=None,\n z=None,\n y=None,\n r=None,\n g=None,\n dccurrent=None,\n dcvoltage=None,\n time=None,\n **kwargs):\n \"\"\"\n Set attribute of an existing parameter.\n To be used to alter an attribute inherited from parent models.\n\n See .. self.param_define for argument descriptions.\n \"\"\"\n assert param in self._data, \\\n 'parameter <{}> does not exist in {}'.format(param, self._name)\n\n def alter_attr(p, attr, value):\n \"\"\"Set self.__dict__[attr] for param based on value\n \"\"\"\n if value is None:\n return\n elif (value is True) and (p not in self.__dict__[attr]):\n self.__dict__[attr].append(p)\n elif (value is False) and (p in self.__dict__[attr]):\n self.__dict__[attr].remove(p)\n else:\n self.log('No need to alter {} for {}'.format(attr, p))\n\n if default is not None:\n self._data.update({param: default})\n if unit is not None:\n self._units.update({param: unit})\n if descr is not None:\n self._descr.update({param: descr})\n\n alter_attr(param, '_params', tomatrix)\n alter_attr(param, '_zeros', nonzero)\n alter_attr(param, '_mandatory', mandatory)\n alter_attr(param, '_powers', power)\n alter_attr(param, '_voltages', voltage)\n alter_attr(param, '_currents', current)\n alter_attr(param, '_z', z)\n alter_attr(param, '_y', y)\n alter_attr(param, '_r', r)\n alter_attr(param, '_g', g)\n alter_attr(param, '_dcvoltages', dcvoltage)\n alter_attr(param, '_dccurrents', dccurrent)\n alter_attr(param, '_times', time)", "response": "Modify an existing parameter in the object."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nadd an equation to the model.", "response": "def eq_add(self, expr, var, intf=False):\n \"\"\"\n Add an equation to this model.\n\n An equation is associated with the addresses of a variable. The\n number of equations must equal that of variables.\n\n Stored to ``self._equations`` is a tuple of ``(expr, var, intf,\n ty)`` where ``ty`` is in ('f', 'g')\n\n :param str expr: equation expression\n :param str var: variable name to be associated with\n :param bool intf: if this equation is added to an interface equation,\n namely, an equation outside this model\n\n :return: None\n\n \"\"\"\n\n # determine the type of the equation, ('f', 'g') based on var\n\n # We assume that all differential equations are only modifiable by\n # the model itself\n # Only interface to algebraic varbailes of external models\n\n ty = ''\n if var in self._algebs:\n ty = 'g'\n elif var in self._states:\n ty = 'f'\n else:\n for _, item in self._ac:\n if var in item:\n ty = 'g'\n if intf is False:\n intf = True\n for _, item in self._dc:\n if var == item:\n ty = 'g'\n if intf is False:\n intf = True\n if ty == '':\n self.log(\n 'Equation associated with interface variable {var} '\n 'assumed as algeb'.\n format(var=var),\n DEBUG)\n ty = 'g'\n\n self._equations.append((expr, var, intf, ty))"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef read_data_ext(self, model: str, field: str, idx=None, astype=None):\n ret = list()\n\n if model in self.system.devman.devices:\n ret = self.system.__dict__[model].get_field(field, idx)\n\n elif model in self.system.devman.group.keys():\n # ===============================================================\n # Since ``self.system.devman.group`` is an unordered dictionary,\n # ``idx`` must be given to retrieve ``field`` across models.\n #\n # Returns a matrix by default\n # ===============================================================\n assert idx is not None, \\\n 'idx must be specified when accessing group fields'\n astype = matrix\n\n for item in idx:\n dev_name = self.system.devman.group[model].get(item, None)\n ret.append(self.read_data_ext(dev_name, field, idx=item))\n\n else:\n raise NameError(\n 'Model or Group <{0}> does not exist.'.format(model))\n\n if (ret is None) or isinstance(ret, (int, float, str)):\n return ret\n elif astype is None:\n return ret\n else:\n return astype(ret)", "response": "Reads a field of a given model or group."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef copy_data_ext(self, model, field, dest=None, idx=None, astype=None):\n # use default destination\n\n if not dest:\n dest = field\n assert dest not in self._states + self._algebs\n\n self.__dict__[dest] = self.read_data_ext(\n model, field, idx, astype=astype)\n\n if idx is not None:\n if len(idx) == self.n:\n self.link_to(model, idx, self.idx)", "response": "Copy the data of another model to another model."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nadds an element of this model to the internal list of elements.", "response": "def elem_add(self, idx=None, name=None, **kwargs):\n \"\"\"\n Add an element of this model\n\n :param idx: element idx\n :param name: element name\n :param kwargs: keyword arguments of the parameters\n :return: allocated idx\n \"\"\"\n\n idx = self.system.devman.register_element(dev_name=self._name, idx=idx)\n self.system.__dict__[self._group].register_element(self._name, idx)\n\n self.uid[idx] = self.n\n self.idx.append(idx)\n\n self.mdl_to.append(list())\n self.mdl_from.append(list())\n\n # self.n += 1\n\n if name is None:\n self.name.append(self._name + ' ' + str(self.n))\n else:\n self.name.append(name)\n\n # check mandatory parameters\n for key in self._mandatory:\n if key not in kwargs.keys():\n self.log(\n 'Mandatory parameter <{:s}.{:s}> missing'.format(\n self.name[-1], key), ERROR)\n sys.exit(1)\n\n # set default values\n for key, value in self._data.items():\n self.__dict__[key].append(value)\n\n # overwrite custom values\n for key, value in kwargs.items():\n if key not in self._data:\n self.log(\n 'Parameter <{:s}.{:s}> is not used.'.format(\n self.name[-1], key), WARNING)\n continue\n self.__dict__[key][-1] = value\n\n # check data consistency\n if not value and key in self._zeros:\n if key == 'Sn':\n default = self.system.mva\n elif key == 'fn':\n default = self.system.config.freq\n else:\n default = self._data[key]\n self.__dict__[key][-1] = default\n self.log(\n 'Using default value for <{:s}.{:s}>'.format(\n self.name[-1], key), WARNING)\n\n return idx"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef elem_remove(self, idx=None):\n if idx is not None:\n if idx in self.uid:\n key = idx\n item = self.uid[idx]\n else:\n self.log('The item <{:s}> does not exist.'.format(idx), ERROR)\n return None\n else:\n return None\n\n convert = False\n if isinstance(self.__dict__[self._params[0]], matrix):\n self._param_to_list()\n convert = True\n\n # self.n -= 1\n self.uid.pop(key, '')\n self.idx.pop(item)\n self.mdl_from.pop(item)\n self.mdl_to.pop(item)\n\n for x, y in self.uid.items():\n if y > item:\n self.uid[x] = y - 1\n\n for param in self._data:\n self.__dict__[param].pop(item)\n\n for param in self._service:\n if len(self.__dict__[param]) == (self.n + 1):\n if isinstance(self.__dict__[param], list):\n self.__dict__[param].pop(item)\n elif isinstance(self.__dict__[param], matrix):\n service = list(self.__dict__[param])\n service.pop(item)\n self.__dict__[param] = matrix(service)\n\n for x in self._states:\n if len(self.__dict__[x]):\n self.__dict__[x].pop(item)\n\n for y in self._algebs:\n if self.__dict__[y]:\n self.__dict__[y].pop(item)\n\n for key, param in self._ac.items():\n if isinstance(param, list):\n for subparam in param:\n if len(self.__dict__[subparam]):\n self.__dict__[subparam].pop(item)\n else:\n self.__dict__[param].pop(item)\n\n for key, param in self._dc.items():\n self.__dict__[param].pop(item)\n\n self.name.pop(item)\n if convert and self.n:\n self._param_to_matrix()", "response": "Removes elements labeled by idx from this model instance."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef data_to_sys_base(self):\n if (not self.n) or self._flags['sysbase']:\n return\n Sb = self.system.mva\n Vb = matrix([])\n if 'bus' in self._ac.keys():\n Vb = self.read_data_ext('Bus', 'Vn', idx=self.bus)\n elif 'bus1' in self._ac.keys():\n Vb = self.read_data_ext('Bus', 'Vn', idx=self.bus1)\n\n for var in self._voltages:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], self.Vn)\n self.__dict__[var] = div(self.__dict__[var], Vb)\n for var in self._powers:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], self.Sn)\n self.__dict__[var] /= Sb\n for var in self._currents:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], self.Sn)\n self.__dict__[var] = div(self.__dict__[var], self.Vn)\n self.__dict__[var] = mul(self.__dict__[var], Vb)\n self.__dict__[var] /= Sb\n if len(self._z) or len(self._y):\n Zn = div(self.Vn**2, self.Sn)\n Zb = (Vb**2) / Sb\n for var in self._z:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], Zn)\n self.__dict__[var] = div(self.__dict__[var], Zb)\n for var in self._y:\n self._store[var] = self.__dict__[var]\n if self.__dict__[var].typecode == 'd':\n self.__dict__[var] = div(self.__dict__[var], Zn)\n self.__dict__[var] = mul(self.__dict__[var], Zb)\n elif self.__dict__[var].typecode == 'z':\n self.__dict__[var] = div(self.__dict__[var], Zn + 0j)\n self.__dict__[var] = mul(self.__dict__[var], Zb + 0j)\n\n if len(self._dcvoltages) or len(self._dccurrents) or len(\n self._r) or len(self._g):\n dckey = sorted(self._dc.keys())[0]\n Vbdc = self.read_data_ext('Node', 'Vdcn', self.__dict__[dckey])\n Ib = div(Sb, Vbdc)\n Rb = div(Vbdc, Ib)\n\n for var in self._dcvoltages:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], self.Vdcn)\n self.__dict__[var] = div(self.__dict__[var], Vbdc)\n\n for var in self._dccurrents:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], self.Idcn)\n self.__dict__[var] = div(self.__dict__[var], Ib)\n\n for var in self._r:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = div(self.__dict__[var], Rb)\n\n for var in self._g:\n self._store[var] = self.__dict__[var]\n self.__dict__[var] = mul(self.__dict__[var], Rb)\n\n self._flags['sysbase'] = True", "response": "Converts the data to system base."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef data_to_elem_base(self):\n if self._flags['sysbase'] is False:\n return\n\n for key, val in self._store.items():\n self.__dict__[key] = val\n\n self._flags['sysbase'] = False", "response": "Convert parameter data to element base"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nretrieve the ac and dc network interface variable indices.", "response": "def _intf_network(self):\n \"\"\"\n Retrieve the ac and dc network interface variable indices.\n\n :Example:\n\n ``self._ac = {'bus1': (a1, v1)}`` gives\n - indices: self.bus1\n - system.Bus.a -> self.a1\n - system.Bus.v -> self.v1\n\n ``self._dc = {'node1': v1}`` gives\n - indices: self.node1\n - system.Node.v-> self.v1\n\n :return: None\n \"\"\"\n\n for key, val in self._ac.items():\n self.copy_data_ext(\n model='Bus', field='a', dest=val[0], idx=self.__dict__[key])\n self.copy_data_ext(\n model='Bus', field='v', dest=val[1], idx=self.__dict__[key])\n\n for key, val in self._dc.items():\n self.copy_data_ext(\n model='Node', field='v', dest=val, idx=self.__dict__[key])\n\n # check for interface voltage differences\n self._check_Vn()"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nretrieves variable indices of controlled models.", "response": "def _intf_ctrl(self):\n \"\"\"\n Retrieve variable indices of controlled models.\n\n Control interfaces are specified in ``self._ctrl``.\n Each ``key:value`` pair has ``key`` being the variable names\n for the reference idx and ``value`` being a tuple of\n ``(model name, field to read, destination field, return type)``.\n\n :Example:\n\n ``self._ctrl = {'syn', ('Synchronous', 'omega', 'w', list)}``\n - indices: self.syn\n - self.w = list(system.Synchronous.omega)\n\n :return: None\n \"\"\"\n\n for key, val in self._ctrl.items():\n model, field, dest, astype = val\n self.copy_data_ext(\n model, field, dest=dest, idx=self.__dict__[key], astype=astype)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _addr(self):\n group_by = self._config['address_group_by']\n\n assert not self._flags['address'], \"{} address already assigned\".format(self._name)\n assert group_by in ('element', 'variable')\n\n m0 = self.system.dae.m\n n0 = self.system.dae.n\n mend = m0 + len(self._algebs) * self.n\n nend = n0 + len(self._states) * self.n\n\n if group_by == 'variable':\n for idx, item in enumerate(self._algebs):\n self.__dict__[item] = list(\n range(m0 + idx * self.n, m0 + (idx + 1) * self.n))\n for idx, item in enumerate(self._states):\n self.__dict__[item] = list(\n range(n0 + idx * self.n, n0 + (idx + 1) * self.n))\n elif group_by == 'element':\n for idx, item in enumerate(self._algebs):\n self.__dict__[item] = list(\n range(m0 + idx, mend, len(self._algebs)))\n for idx, item in enumerate(self._states):\n self.__dict__[item] = list(\n range(n0 + idx, nend, len(self._states)))\n\n self.system.dae.m = mend\n self.system.dae.n = nend\n\n self._flags['address'] = True", "response": "Assign dae addresses for algebraic and state variables."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _varname(self):\n if not self._flags['address']:\n self.log('Unable to assign Varname before allocating address',\n ERROR)\n return\n\n varname = self.system.varname\n for i in range(self.n):\n iname = str(self.name[i])\n\n for e, var in enumerate(self._states):\n unamex = self._unamex[e]\n fnamex = self._fnamex[e]\n idx = self.__dict__[var][i]\n\n varname.unamex[idx] = '{} {}'.format(unamex, iname)[:33]\n varname.fnamex[idx] = '$' + '{}\\\\ {}'.format(\n fnamex, iname.replace(' ', '\\\\ '))[:33] + '$'\n\n for e, var in enumerate(self._algebs):\n unamey = self._unamey[e]\n fnamey = self._fnamey[e]\n idx = self.__dict__[var][i]\n\n varname.unamey[idx] = '{} {}'.format(unamey, iname)[:33]\n varname.fnamey[idx] = '$' + '{}\\\\ {}'.format(\n fnamey, iname.replace(' ', '\\\\ '))[:33] + '$'", "response": "Set up variable names in self. system. varname."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nconvert parameters defined in self. _params to cvxopt. matrix", "response": "def _param_to_matrix(self):\n \"\"\"\n Convert parameters defined in `self._params` to `cvxopt.matrix`\n\n :return None\n \"\"\"\n for item in self._params:\n self.__dict__[item] = matrix(self.__dict__[item], tc='d')"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nconverts parameters defined in self. _param to list.", "response": "def _param_to_list(self):\n \"\"\"\n Convert parameters defined in `self._param` to list\n\n :return None\n \"\"\"\n for item in self._params:\n self.__dict__[item] = list(self.__dict__[item])"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nrecord a line of log in logger", "response": "def log(self, msg, level=INFO):\n \"\"\"Record a line of log in logger\n\n :param str msg: content of the messag\n\n :param level: logging level\n :return: None\n\n \"\"\"\n logger.log(level, '<{}> - '.format(self._name) + msg)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef init_limit(self, key, lower=None, upper=None, limit=False):\n above = agtb(self.__dict__[key], upper)\n for idx, item in enumerate(above):\n if item == 0.:\n continue\n maxval = upper[idx]\n self.log(\n '{0} <{1}.{2}> above its maximum of {3}.'.format(\n self.name[idx], self._name, key, maxval), ERROR)\n if limit:\n self.__dict__[key][idx] = maxval\n\n below = altb(self.__dict__[key], lower)\n for idx, item in enumerate(below):\n if item == 0.:\n continue\n minval = lower[idx]\n self.log(\n '{0} <{1}.{2}> below its minimum of {3}.'.format(\n self.name[idx], self._name, key, minval), ERROR)\n if limit:\n self.__dict__[key][idx] = minval", "response": "reset if data is within limits. reset if violates"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nbuilding help document into a Texttable table", "response": "def doc(self, export='plain'):\n \"\"\"\n Build help document into a Texttable table\n\n :param ('plain', 'latex') export: export format\n :param save: save to file ``help_model.extension`` or not\n :param writemode: file write mode\n\n :return: None\n \"\"\"\n title = '<{}.{}>'.format(self._group, self._name)\n table = Tab(export=export, title=title, descr=self.__doc__)\n rows = []\n keys = sorted(self._data.keys())\n for key in keys:\n val = self._data[key]\n suf = ''\n if key in self._mandatory:\n suf = ' *'\n\n elif key in self._powers + \\\n self._voltages + \\\n self._currents + \\\n self._z + \\\n self._y + \\\n self._dccurrents + \\\n self._dcvoltages + \\\n self._r + \\\n self._g + \\\n self._times:\n suf = ' #'\n\n c1 = key + suf\n c2 = self._descr.get(key, '')\n c3 = val\n c4 = self._units.get(key, '-')\n rows.append([c1, c2, c3, c4])\n\n table.add_rows(rows, header=False)\n table.header(['Parameter', 'Description', 'Default', 'Unit'])\n\n if export == 'plain':\n pass\n elif export == 'latex':\n raise NotImplementedError('LaTex output not implemented')\n\n return table.draw()"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef check_limit(self, varname, vmin=None, vmax=None):\n retval = True\n assert varname in self.__dict__\n\n if varname in self._algebs:\n val = self.system.dae.y[self.__dict__[varname]]\n elif varname in self._states:\n val = self.system.dae.x[self.__dict__[varname]]\n else: # service or temporary variable\n val = matrix(self.__dict__[varname])\n\n vmin = matrix(self.__dict__[vmin])\n comp = altb(val, vmin)\n comp = mul(self.u, comp)\n for c, n, idx in zip(comp, self.name, range(self.n)):\n if c == 1:\n v = val[idx]\n vm = vmin[idx]\n self.log(\n 'Init of <{}.{}>={:.4g} is lower than min={:6.4g}'.format(\n n, varname, v, vm), ERROR)\n retval = False\n\n vmax = matrix(self.__dict__[vmax])\n comp = agtb(val, vmax)\n comp = mul(self.u, comp)\n for c, n, idx in zip(comp, self.name, range(self.n)):\n if c == 1:\n v = val[idx]\n vm = vmax[idx]\n self.log(\n 'Init of <{}.{}>={:.4g} is higher than max={:.4g}'.format(\n n, varname, v, vm), ERROR)\n retval = False\n return retval", "response": "Check if the variable values are within the limits. Return True if fails."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef on_bus(self, bus_idx):\n assert hasattr(self, 'bus')\n\n ret = []\n if isinstance(bus_idx, (int, float, str)):\n bus_idx = [bus_idx]\n\n for item in bus_idx:\n idx = []\n for e, b in enumerate(self.bus):\n if b == item:\n idx.append(self.idx[e])\n if len(idx) == 1:\n idx = idx[0]\n elif len(idx) == 0:\n idx = None\n\n ret.append(idx)\n\n if len(ret) == 1:\n ret = ret[0]\n\n return ret", "response": "Return the indices of elements connected to the given buses for shunt - connected\n elements\n\n "} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef link_bus(self, bus_idx):\n ret = []\n\n if not self._config['is_series']:\n self.log(\n 'link_bus function is not valid for non-series model <{}>'.\n format(self.name))\n return []\n\n if isinstance(bus_idx, (int, float, str)):\n bus_idx = [bus_idx]\n\n fkey = list(self._ac.keys())\n if 'bus' in fkey:\n fkey.remove('bus')\n\n nfkey = len(fkey)\n fkey_val = [self.__dict__[i] for i in fkey]\n\n for item in bus_idx:\n idx = []\n key = []\n for i in range(self.n):\n for j in range(nfkey):\n if fkey_val[j][i] == item:\n idx.append(self.idx[i])\n key.append(fkey[j])\n # <= 1 terminal should connect to the same bus\n break\n\n if len(idx) == 0:\n idx = None\n if len(key) == 0:\n key = None\n\n ret.append((idx, key))\n\n return ret", "response": "Return the indices of elements linking the given buses"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the indices of elements whose field first satisfies the given values.", "response": "def elem_find(self, field, value):\n \"\"\"\n Return the indices of elements whose field first satisfies the given values\n\n ``value`` should be unique in self.field.\n This function does not check the uniqueness.\n\n :param field: name of the supplied field\n :param value: value of field of the elemtn to find\n :return: idx of the elements\n :rtype: list, int, float, str\n \"\"\"\n if isinstance(value, (int, float, str)):\n value = [value]\n\n f = list(self.__dict__[field])\n uid = np.vectorize(f.index)(value)\n return self.get_idx(uid)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _check_Vn(self):\n if hasattr(self, 'bus') and hasattr(self, 'Vn'):\n bus_Vn = self.read_data_ext('Bus', field='Vn', idx=self.bus)\n for name, bus, Vn, Vn0 in zip(self.name, self.bus, self.Vn,\n bus_Vn):\n if Vn != Vn0:\n self.log(\n '<{}> has Vn={} different from bus <{}> Vn={}.'.format(\n name, Vn, bus, Vn0), WARNING)\n\n if hasattr(self, 'node') and hasattr(self, 'Vdcn'):\n node_Vdcn = self.read_data_ext('Node', field='Vdcn', idx=self.node)\n for name, node, Vdcn, Vdcn0 in zip(self.name, self.node, self.Vdcn,\n node_Vdcn):\n if Vdcn != Vdcn0:\n self.log(\n '<{}> has Vdcn={} different from node <{}> Vdcn={}.'\n .format(name, Vdcn, node, Vdcn0), WARNING)", "response": "Check data consistency of Vn and Vdcn if connected to Bus or Node."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef link_to(self, model, idx, self_idx):\n if model in self.system.loaded_groups:\n\n # access group instance\n grp = self.system.__dict__[model]\n\n # doing it one by one\n for i, self_i in zip(idx, self_idx):\n # query model name and access model instance\n mdl_name = grp._idx_model[i]\n mdl = self.system.__dict__[mdl_name]\n\n # query the corresponding uid\n u = mdl.get_uid(i)\n\n # update `mdl_from`\n name_idx_pair = (self._name, self_i)\n if name_idx_pair not in mdl.mdl_from[u]:\n mdl.mdl_from[u].append(name_idx_pair)\n\n else:\n # access model instance\n mdl = self.system.__dict__[model]\n uid = mdl.get_uid(idx)\n\n for u, self_i in zip(uid, self_idx):\n name_idx_pair = (self._name, self_i)\n\n if name_idx_pair not in mdl.mdl_from[u]:\n mdl.mdl_from[u].append(name_idx_pair)", "response": "Link this object to another object in the specified model."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreset all internal storage to initial status Returns None", "response": "def reset(self):\n \"\"\"\n Reset all internal storage to initial status\n\n Returns\n -------\n None\n \"\"\"\n self.solved = False\n self.niter = 0\n self.iter_mis = []\n self.F = None\n self.system.dae.factorize = True"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef pre(self):\n logger.info('-> Power flow study: {} method, {} start'.format(\n self.config.method.upper(), 'flat' if self.config.flatstart else 'non-flat')\n )\n\n t, s = elapsed()\n\n system = self.system\n dae = self.system.dae\n\n system.dae.init_xy()\n\n for device, pflow, init0 in zip(system.devman.devices,\n system.call.pflow, system.call.init0):\n if pflow and init0:\n system.__dict__[device].init0(dae)\n\n # check for islands\n system.check_islands(show_info=True)\n\n t, s = elapsed(t)\n logger.debug('Power flow initialized in {:s}.'.format(s))", "response": "Initialize the power flow study for the current state of the current state of the current state of the system."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef run(self, **kwargs):\n ret = None\n\n # initialization Y matrix and inital guess\n self.pre()\n t, _ = elapsed()\n\n # call solution methods\n if self.config.method == 'NR':\n ret = self.newton()\n elif self.config.method == 'DCPF':\n ret = self.dcpf()\n elif self.config.method in ('FDPF', 'FDBX', 'FDXB'):\n ret = self.fdpf()\n\n self.post()\n _, s = elapsed(t)\n\n if self.solved:\n logger.info(' Solution converged in {} in {} iterations'.format(s, self.niter))\n else:\n logger.warning(' Solution failed in {} in {} iterations'.format(s,\n self.niter))\n return ret", "response": "run the power flow solution routine\n "} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nlogs iteration number and mismatch", "response": "def _iter_info(self, niter, level=logging.INFO):\n \"\"\"\n Log iteration number and mismatch\n\n Parameters\n ----------\n level\n logging level\n Returns\n -------\n None\n \"\"\"\n max_mis = self.iter_mis[niter - 1]\n msg = ' Iter {: tol:\n # P-theta\n da = matrix(div(system.dae.g[no_sw], system.dae.y[no_swv]))\n self.solver.solve(Bp, Fp, Np, da)\n system.dae.y[no_sw] += da\n\n exec(system.call.fdpf)\n normP = max(abs(system.dae.g[no_sw]))\n\n # Q-V\n dV = matrix(div(system.dae.g[no_gv], system.dae.y[no_gv]))\n self.solver.solve(Bpp, Fpp, Npp, dV)\n system.dae.y[no_gv] += dV\n\n exec(system.call.fdpf)\n normQ = max(abs(system.dae.g[no_gv]))\n\n err = max([normP, normQ])\n self.iter_mis.append(err)\n error = err\n\n self._iter_info(self.niter)\n self.niter += 1\n\n if self.niter > 4 and self.iter_mis[-1] > 1000 * self.iter_mis[0]:\n logger.warning('Blown up in {0} iterations.'.format(self.niter))\n self.solved = False\n break\n\n if self.niter > iter_max:\n logger.warning('Reached maximum number of iterations.')\n self.solved = False\n break\n\n return self.solved, self.niter"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nsetting initial voltage and reactive power for PQ.", "response": "def init0(self, dae):\n \"\"\"\n Set initial voltage and reactive power for PQ.\n Overwrites Bus.voltage values\n \"\"\"\n dae.y[self.v] = self.v0\n dae.y[self.q] = mul(self.u, self.qg)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ndisable a PV element for TDS", "response": "def disable_gen(self, idx):\n \"\"\"\n Disable a PV element for TDS\n\n Parameters\n ----------\n idx\n\n Returns\n -------\n\n \"\"\"\n self.u[self.uid[idx]] = 0\n self.system.dae.factorize = True"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsets initial p and q for power flow", "response": "def init0(self, dae):\n \"\"\"Set initial p and q for power flow\"\"\"\n self.p0 = matrix(self.p, (self.n, 1), 'd')\n self.q0 = matrix(self.q, (self.n, 1), 'd')"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nsets initial voltage for time domain simulation", "response": "def init1(self, dae):\n \"\"\"Set initial voltage for time domain simulation\"\"\"\n self.v0 = matrix(dae.y[self.v])"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nprocess incoming status message. Acquire lock for status dictionary before updating.", "response": "def update_status(self, header, message):\n \"\"\"Process incoming status message. Acquire lock for status dictionary before updating.\"\"\"\n with self._lock:\n if self.message_box:\n self.message_box.erase()\n self.message_box.move(0, 0)\n for n, field in enumerate(header):\n if n == 0:\n self.message_box.addstr(field + \":\", curses.color_pair(1))\n else:\n self.message_box.addstr(\n \", \" + field + \":\", curses.color_pair(1)\n )\n self.message_box.addstr(header[field])\n self.message_box.addstr(\": \", curses.color_pair(1))\n self.message_box.addstr(\n str(message), curses.color_pair(2) + curses.A_BOLD\n )\n self.message_box.refresh()\n if (\n message[\"host\"] not in self._node_status\n or int(header[\"timestamp\"])\n >= self._node_status[message[\"host\"]][\"last_seen\"]\n ):\n self._node_status[message[\"host\"]] = message\n self._node_status[message[\"host\"]][\"last_seen\"] = int(\n header[\"timestamp\"]\n )"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _redraw_screen(self, stdscr):\n with self._lock:\n stdscr.clear()\n stdscr.addstr(\n 0, 0, \"workflows service monitor -- quit with Ctrl+C\", curses.A_BOLD\n )\n stdscr.refresh()\n self.message_box = self._boxwin(\n 5, curses.COLS, 2, 0, title=\"last seen message\", color_pair=1\n )\n self.message_box.scrollok(True)\n self.cards = []", "response": "Redraw screen. This could be to initialize after resizing."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ndestroy cards with this number.", "response": "def _erase_card(self, number):\n \"\"\"Destroy cards with this or higher number.\"\"\"\n with self._lock:\n if number < (len(self.cards) - 1):\n self._erase_card(number + 1)\n if number > (len(self.cards) - 1):\n return\n max_cards_horiz = int(curses.COLS / 35)\n obliterate = curses.newwin(\n 6,\n 35,\n 7 + 6 * (number // max_cards_horiz),\n 35 * (number % max_cards_horiz),\n )\n obliterate.erase()\n obliterate.noutrefresh()\n del self.cards[number]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _run(self, stdscr):\n with self._lock:\n curses.use_default_colors()\n curses.curs_set(False)\n curses.init_pair(1, curses.COLOR_RED, -1)\n curses.init_pair(2, curses.COLOR_BLACK, -1)\n curses.init_pair(3, curses.COLOR_GREEN, -1)\n self._redraw_screen(stdscr)\n\n try:\n while not self.shutdown:\n now = int(time.time())\n with self._lock:\n overview = self._node_status.copy()\n cardnumber = 0\n for host, status in overview.items():\n age = now - int(status[\"last_seen\"] / 1000)\n with self._lock:\n if age > 90:\n del self._node_status[host]\n else:\n card = self._get_card(cardnumber)\n card.erase()\n card.move(0, 0)\n card.addstr(\"Host: \", curses.color_pair(3))\n card.addstr(host)\n card.move(1, 0)\n card.addstr(\"Service: \", curses.color_pair(3))\n if \"service\" in status and status[\"service\"]:\n card.addstr(status[\"service\"])\n else:\n card.addstr(\"---\", curses.color_pair(2))\n card.move(2, 0)\n card.addstr(\"State: \", curses.color_pair(3))\n if \"status\" in status:\n status_code = status[\"status\"]\n state_string = CommonService.human_readable_state.get(\n status_code, str(status_code)\n )\n state_color = None\n if status_code in (\n CommonService.SERVICE_STATUS_PROCESSING,\n CommonService.SERVICE_STATUS_TIMER,\n ):\n state_color = curses.color_pair(3) + curses.A_BOLD\n if status_code == CommonService.SERVICE_STATUS_IDLE:\n state_color = curses.color_pair(2) + curses.A_BOLD\n if status_code == CommonService.SERVICE_STATUS_ERROR:\n state_color = curses.color_pair(1)\n if state_color:\n card.addstr(state_string, state_color)\n else:\n card.addstr(state_string)\n card.move(3, 0)\n if age >= 10:\n card.addstr(\n \"last seen %d seconds ago\" % age,\n curses.color_pair(1)\n + (0 if age < 60 else curses.A_BOLD),\n )\n card.noutrefresh()\n cardnumber = cardnumber + 1\n if cardnumber < len(self.cards):\n with self._lock:\n self._erase_card(cardnumber)\n with self._lock:\n curses.doupdate()\n time.sleep(0.2)\n except KeyboardInterrupt:\n pass # User pressed CTRL+C\n self._transport.disconnect()", "response": "Start the actual service monitor"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef switch(self, idx, control):\n old = None\n new = None\n if control == 'Q':\n if self.PQ[idx] == 1:\n old = 'PQ'\n new = 'PV'\n elif self.vQ[idx] == 1:\n old = 'vQ'\n new = 'vV'\n elif control == 'P':\n if self.PQ[idx] == 1:\n old = 'PQ'\n new = 'vQ'\n elif self.PV[idx] == 1:\n old = 'PV'\n new = 'vV'\n elif control == 'V':\n if self.PV[idx] == 1:\n old = 'PV'\n new = 'PQ'\n elif self.vV[idx] == 1:\n old = 'vV'\n new = 'vQ'\n elif control == 'v':\n if self.vQ[idx] == 1:\n old = 'vQ'\n new = 'PQ'\n elif self.vV[idx] == 1:\n old = 'vV'\n new = 'PV'\n if old and new:\n self.__dict__[old][idx] = 0\n self.__dict__[new][idx] = 1", "response": "Switch a single control of idx"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef disable(self, idx):\n if idx not in self.uid.keys():\n self.log('Element index {0} does not exist.'.format(idx))\n return\n self.u[self.uid[idx]] = 0", "response": "Disable an element and reset the outputs"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the alternative values of an option", "response": "def get_alt(self, option):\n \"\"\"\n Return the alternative values of an option\n\n Parameters\n ----------\n option: str\n option name\n\n Returns\n -------\n str\n a string of alternative options\n\n \"\"\"\n assert hasattr(self, option)\n\n alt = option + '_alt'\n if not hasattr(self, alt):\n return ''\n return ', '.join(self.__dict__[alt])"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef doc(self, export='plain'):\n rows = []\n title = '<{:s}> config options'.format(self.__class__.__name__)\n table = Tab(export=export, title=title)\n\n for opt in sorted(self.config_descr):\n if hasattr(self, opt):\n c1 = opt\n c2 = self.config_descr[opt]\n c3 = self.__dict__.get(opt, '')\n c4 = self.get_alt(opt)\n rows.append([c1, c2, c3, c4])\n else:\n print('Setting {:s} has no {:s} option. Correct in config_descr.'.\n format(self.__class__.__name__, opt))\n\n table.add_rows(rows, header=False)\n table.header(['Option', 'Description', 'Value', 'Alt.'])\n\n return table.draw()", "response": "Dump help document for setting classes"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef dump_conf(self, conf=None):\n if conf is None:\n conf = configparser.ConfigParser()\n\n tab = self.__class__.__name__\n\n conf[tab] = {}\n\n for key, val in self.__dict__.items():\n if key.endswith('_alt'):\n continue\n conf[tab][key] = str(val)\n\n return conf", "response": "Dump settings to an rc config file."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nload configurations from an rc file and update the object with the values.", "response": "def load_config(self, conf):\n \"\"\"\n Load configurations from an rc file\n\n Parameters\n ----------\n rc: str\n path to the rc file\n\n Returns\n -------\n None\n \"\"\"\n section = self.__class__.__name__\n\n if section not in conf.sections():\n logger.debug('Config section {} not in rc file'.format(\n self.__class__.__name__))\n return\n\n for key in conf[section].keys():\n if not hasattr(self, key):\n logger.debug('Config key {}.{} skipped'.format(section, key))\n continue\n\n val = conf[section].get(key)\n\n try:\n val = conf[section].getfloat(key)\n except ValueError:\n try:\n val = conf[section].getboolean(key)\n except ValueError:\n pass\n\n self.__dict__.update({key: val})\n\n self.check()"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nrecord a status change with a current timestamp.", "response": "def update_status(self, new_status):\n \"\"\"Record a status change with a current timestamp.\"\"\"\n timestamp = time.time()\n self.status_history[-1][\"end\"] = timestamp\n self.status_history.append(\n {\"start\": timestamp, \"end\": None, \"status\": new_status}\n )"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns a dictionary of different status codes and the percentage of time spent in each throughout the last summation_period seconds. Truncate the aggregated history appropriately.", "response": "def report(self):\n \"\"\"Return a dictionary of different status codes and the percentage of time\n spent in each throughout the last summation_period seconds.\n Truncate the aggregated history appropriately.\"\"\"\n timestamp = time.time()\n cutoff = timestamp - self.period\n truncate = 0\n summary = {}\n for event in self.status_history[:-1]:\n if event[\"end\"] < cutoff:\n truncate = truncate + 1\n continue\n summary[event[\"status\"]] = (\n summary.get(event[\"status\"], 0)\n + event[\"end\"]\n - max(cutoff, event[\"start\"])\n )\n summary[self.status_history[-1][\"status\"]] = (\n summary.get(self.status_history[-1][\"status\"], 0)\n + timestamp\n - max(cutoff, self.status_history[-1][\"start\"])\n )\n if truncate:\n self.status_history = self.status_history[truncate:]\n total_duration = sum(summary.values())\n summary = {s: round(d / total_duration, 4) for s, d in summary.items()}\n return summary"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ngenerate a unique ID that is somewhat guaranteed to be unique among all instances running at the same time.", "response": "def generate_unique_host_id():\n \"\"\"Generate a unique ID, that is somewhat guaranteed to be unique among all\n instances running at the same time.\"\"\"\n host = \".\".join(reversed(socket.gethostname().split(\".\")))\n pid = os.getpid()\n return \"%s.%d\" % (host, pid)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef data_to_sys_base(self):\n if not self.n or self._flags['sysbase'] is True:\n return\n\n self.copy_data_ext(model='Synchronous', field='Sn', dest='Sn', idx=self.gen)\n super(GovernorBase, self).data_to_sys_base()\n self._store['R'] = self.R\n\n self.R = self.system.mva * div(self.R, self.Sn)", "response": "Custom system base conversion function"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef data_to_elem_base(self):\n if not self.n or self._flags['sysbase'] is False:\n return\n self.R = mul(self.R, self.Sn) / self.system.mva\n super(GovernorBase, self).data_to_elem_base()", "response": "Custom system base unconversion function"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nadd a suffix to a full file name", "response": "def add_suffix(fullname, suffix):\n \"\"\" Add suffix to a full file name\"\"\"\n name, ext = os.path.splitext(fullname)\n return name + '_' + suffix + ext"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the original full path if full path is specified otherwise return the original full path", "response": "def get_fullpath(self, fullname=None, relative_to=None):\n \"\"\"\n Return the original full path if full path is specified, otherwise\n search in the case file path\n \"\"\"\n # if is an empty path\n if not fullname:\n return fullname\n\n isabs = os.path.isabs(fullname)\n\n path, name = os.path.split(fullname)\n\n if not name: # path to a folder\n return None\n else: # path to a file\n if isabs:\n return fullname\n else:\n return os.path.join(self.case_path, path, name)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nswitch if time for eAgc has come", "response": "def switch(self):\n \"\"\"Switch if time for eAgc has come\"\"\"\n t = self.system.dae.t\n for idx in range(0, self.n):\n if t >= self.tl[idx]:\n if self.en[idx] == 0:\n self.en[idx] = 1\n logger.info(\n 'Extended ACE <{}> activated at t = {}.'.format(\n self.idx[idx], t))"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a list of command - line interface names by querying hook in all_pkg.", "response": "def get_command(all_pkg, hook):\n \"\"\"\n Collect the command-line interface names by querying ``hook`` in ``all_pkg``\n\n Parameters\n ----------\n all_pkg: list\n list of package files\n hook: str\n A variable where the command is stored. ``__cli__`` by default.\n Returns\n -------\n list\n \"\"\"\n ret = []\n for r in all_pkg:\n module = importlib.import_module(__name__ + '.' + r.lower())\n ret.append(getattr(module, hook))\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_times(self):\n if not self.n:\n return list()\n\n ret = list()\n\n for item in self._event_times:\n ret += list(self.__dict__[item])\n\n return ret + list(matrix(ret) - 1e-6)", "response": "Return a list of occurrance times of the events\n "} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef apply(self, sim_time):\n if not self.n:\n return\n\n # skip if already applied\n if self._last_time != sim_time:\n self._last_time = sim_time\n else:\n return", "response": "Apply the event and the related attributes."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nbuild the transmission line admittance matrix into self. Y.", "response": "def build_y(self):\n \"\"\"Build transmission line admittance matrix into self.Y\"\"\"\n if not self.n:\n return\n self.y1 = mul(self.u, self.g1 + self.b1 * 1j)\n self.y2 = mul(self.u, self.g2 + self.b2 * 1j)\n self.y12 = div(self.u, self.r + self.x * 1j)\n self.m = polar(self.tap, self.phi * deg2rad)\n self.m2 = abs(self.m)**2\n self.mconj = conj(self.m)\n\n # build self and mutual admittances into Y\n self.Y = spmatrix(\n div(self.y12 + self.y1, self.m2), self.a1, self.a1,\n (self.nb, self.nb), 'z')\n self.Y -= spmatrix(\n div(self.y12, self.mconj), self.a1, self.a2, (self.nb, self.nb),\n 'z')\n self.Y -= spmatrix(\n div(self.y12, self.m), self.a2, self.a1, (self.nb, self.nb), 'z')\n self.Y += spmatrix(self.y12 + self.y2, self.a2, self.a2,\n (self.nb, self.nb), 'z')"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef build_b(self):\n if not self.n:\n return\n method = self.system.pflow.config.method.lower()\n\n # Build B prime matrix\n y1 = mul(\n self.u, self.g1\n ) # y1 neglects line charging shunt, and g1 is usually 0 in HV lines\n y2 = mul(\n self.u, self.g2\n ) # y2 neglects line charging shunt, and g2 is usually 0 in HV lines\n m = polar(1.0, self.phi * deg2rad) # neglected tap ratio\n self.mconj = conj(m)\n m2 = matrix(1.0, (self.n, 1), 'z')\n if method in ('fdxb', 'dcpf'):\n # neglect line resistance in Bp in XB method\n y12 = div(self.u, self.x * 1j)\n else:\n y12 = div(self.u, self.r + self.x * 1j)\n self.Bp = spmatrix(\n div(y12 + y1, m2), self.a1, self.a1, (self.nb, self.nb), 'z')\n self.Bp -= spmatrix(\n div(y12, conj(m)), self.a1, self.a2, (self.nb, self.nb), 'z')\n self.Bp -= spmatrix(\n div(y12, m), self.a2, self.a1, (self.nb, self.nb), 'z')\n self.Bp += spmatrix(y12 + y2, self.a2, self.a2, (self.nb, self.nb),\n 'z')\n self.Bp = self.Bp.imag()\n\n # Build B double prime matrix\n y1 = mul(\n self.u, self.g1 + self.b1 * 1j\n ) # y1 neglected line charging shunt, and g1 is usually 0 in HV lines\n y2 = mul(\n self.u, self.g2 + self.b2 * 1j\n ) # y2 neglected line charging shunt, and g2 is usually 0 in HV lines\n m = self.tap + 0j # neglected phase shifter\n m2 = abs(m)**2 + 0j\n\n if method in ('fdbx', 'fdpf', 'dcpf'):\n # neglect line resistance in Bpp in BX method\n y12 = div(self.u, self.x * 1j)\n else:\n y12 = div(self.u, self.r + self.x * 1j)\n self.Bpp = spmatrix(\n div(y12 + y1, m2), self.a1, self.a1, (self.nb, self.nb), 'z')\n self.Bpp -= spmatrix(\n div(y12, conj(m)), self.a1, self.a2, (self.nb, self.nb), 'z')\n self.Bpp -= spmatrix(\n div(y12, m), self.a2, self.a1, (self.nb, self.nb), 'z')\n self.Bpp += spmatrix(y12 + y2, self.a2, self.a2, (self.nb, self.nb),\n 'z')\n self.Bpp = self.Bpp.imag()\n\n for item in range(self.nb):\n if abs(self.Bp[item, item]) == 0:\n self.Bp[item, item] = 1e-6 + 0j\n if abs(self.Bpp[item, item]) == 0:\n self.Bpp[item, item] = 1e-6 + 0j", "response": "build Bp and Bpp for fast decoupled method"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef incidence(self):\n self.C = \\\n spmatrix(self.u, range(self.n), self.a1, (self.n, self.nb), 'd') -\\\n spmatrix(self.u, range(self.n), self.a2, (self.n, self.nb), 'd')", "response": "Build incidence matrix into self. C"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nchecks connectivity of a bus using Goderya s algorithm", "response": "def connectivity(self, bus):\n \"\"\"check connectivity of network using Goderya's algorithm\"\"\"\n if not self.n:\n return\n n = self.nb\n fr = self.a1\n to = self.a2\n os = [0] * self.n\n\n # find islanded buses\n diag = list(\n matrix(\n spmatrix(self.u, to, os, (n, 1), 'd') +\n spmatrix(self.u, fr, os, (n, 1), 'd')))\n nib = bus.n_islanded_buses = diag.count(0)\n bus.islanded_buses = []\n for idx in range(n):\n if diag[idx] == 0:\n bus.islanded_buses.append(idx)\n\n # find islanded areas\n temp = spmatrix(\n list(self.u) * 4, fr + to + fr + to, to + fr + fr + to, (n, n),\n 'd')\n cons = temp[0, :]\n nelm = len(cons.J)\n conn = spmatrix([], [], [], (1, n), 'd')\n bus.island_sets = []\n idx = islands = 0\n enum = 0\n\n while 1:\n while 1:\n cons = cons * temp\n cons = sparse(cons) # remove zero values\n new_nelm = len(cons.J)\n if new_nelm == nelm:\n break\n nelm = new_nelm\n if len(cons.J) == n: # all buses are interconnected\n return\n bus.island_sets.append(list(cons.J))\n conn += cons\n islands += 1\n nconn = len(conn.J)\n if nconn >= (n - nib):\n bus.island_sets = [i for i in bus.island_sets if i != []]\n break\n\n for element in conn.J[idx:]:\n if not diag[idx]:\n enum += 1 # skip islanded buses\n if element <= enum:\n idx += 1\n enum += 1\n else:\n break\n\n cons = temp[enum, :]"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nbuild line Jacobian matrix", "response": "def build_gy(self, dae):\n \"\"\"Build line Jacobian matrix\"\"\"\n if not self.n:\n idx = range(dae.m)\n dae.set_jac(Gy, 1e-6, idx, idx)\n return\n\n Vn = polar(1.0, dae.y[self.a])\n Vc = mul(dae.y[self.v], Vn)\n Ic = self.Y * Vc\n\n diagVn = spdiag(Vn)\n diagVc = spdiag(Vc)\n diagIc = spdiag(Ic)\n\n dS = self.Y * diagVn\n dS = diagVc * conj(dS)\n dS += conj(diagIc) * diagVn\n\n dR = diagIc\n dR -= self.Y * diagVc\n dR = diagVc.H.T * dR\n\n self.gy_store = sparse([[dR.imag(), dR.real()], [dS.real(),\n dS.imag()]])\n\n return self.gy_store"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncompute the flow through the line after solving PF.", "response": "def seriesflow(self, dae):\n \"\"\"\n Compute the flow through the line after solving PF.\n\n Compute terminal injections, line losses\n \"\"\"\n\n # Vm = dae.y[self.v]\n # Va = dae.y[self.a]\n # V1 = polar(Vm[self.a1], Va[self.a1])\n # V2 = polar(Vm[self.a2], Va[self.a2])\n\n I1 = mul(self.v1, div(self.y12 + self.y1, self.m2)) - \\\n mul(self.v2, div(self.y12, self.mconj))\n I2 = mul(self.v2, self.y12 + self.y2) - \\\n mul(self.v2, div(self.y12, self.m))\n\n self.I1_real = I1.real()\n self.I1_imag = I1.imag()\n self.I2_real = I2.real()\n self.I2_imag = I2.imag()\n\n self.S1 = mul(self.v1, conj(I1))\n self.S2 = mul(self.v2, conj(I2))\n\n self.P1 = self.S1.real()\n self.P2 = self.S2.real()\n self.Q1 = self.S1.imag()\n self.Q2 = self.S2.imag()\n\n self.chg1 = mul(self.g1 + 1j * self.b1, div(self.v1**2, self.m2))\n self.chg2 = mul(self.g2 + 1j * self.b2, self.v2**2)\n\n self.Pchg1 = self.chg1.real()\n self.Pchg2 = self.chg2.real()\n\n self.Qchg1 = self.chg1.imag()\n self.Qchg2 = self.chg2.imag()\n\n self._line_flows = matrix([self.P1, self.P2, self.Q1, self.Q2,\n self.I1_real, self.I1_imag,\n self.I2_real, self.I2_imag])"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns voltage phasors at the from buses ( bus1", "response": "def v1(self):\n \"\"\"Return voltage phasors at the \"from buses\" (bus1)\"\"\"\n Vm = self.system.dae.y[self.v]\n Va = self.system.dae.y[self.a]\n return polar(Vm[self.a1], Va[self.a1])"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns voltage phasors at the to buses ( bus1", "response": "def v2(self):\n \"\"\"Return voltage phasors at the \"to buses\" (bus2)\"\"\"\n Vm = self.system.dae.y[self.v]\n Va = self.system.dae.y[self.a]\n return polar(Vm[self.a2], Va[self.a2])"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef switch(self, idx, u):\n self.u[self.uid[idx]] = u\n self.rebuild = True\n self.system.dae.factorize = True\n logger.debug(' Status switch to {} on idx {}.'.format(u, idx))", "response": "switch the status of Line idx"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nbuilding variable names for the current object.", "response": "def _varname_flow(self):\n \"\"\"Build variable names for Pij, Pji, Qij, Qji, Sij, Sji\"\"\"\n if not self.n:\n return\n\n mpq = self.system.dae.m + 2 * self.system.Bus.n\n nl = self.n\n\n # Pij\n xy_idx = range(mpq, mpq + nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Pij',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='P_{ij}',\n element_name=self.name)\n\n # Pji\n xy_idx = range(mpq + nl, mpq + 2 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Pji',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='P_{ji}',\n element_name=self.name)\n\n # Qij\n xy_idx = range(mpq + 2 * nl, mpq + 3 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Qij',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='Q_{ij}',\n element_name=self.name)\n\n # Qji\n xy_idx = range(mpq + 3 * nl, mpq + 4 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Qji',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='Q_{ji}',\n element_name=self.name)\n\n # Iij Real\n xy_idx = range(mpq + 4 * nl, mpq + 5 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Iij_Re',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='\\\\Re(I_{ij})',\n element_name=self.name)\n\n # Iij Imag\n xy_idx = range(mpq + 5 * nl, mpq + 6 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Iij_Im',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='\\\\Im(I_{ij})',\n element_name=self.name)\n\n # Iji Real\n xy_idx = range(mpq + 6 * nl, mpq + 7 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Iji_Re',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='\\\\Re(I_{ji})',\n element_name=self.name)\n\n # Iji Imag\n xy_idx = range(mpq + 7 * nl, mpq + 8 * nl)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Iji_Im',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='\\\\Im(I_{ji})',\n element_name=self.name)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the seriesflow based on the external idx on the bus side", "response": "def get_flow_by_idx(self, idx, bus):\n \"\"\"Return seriesflow based on the external idx on the `bus` side\"\"\"\n P, Q = [], []\n if type(idx) is not list:\n idx = [idx]\n if type(bus) is not list:\n bus = [bus]\n\n for line_idx, bus_idx in zip(idx, bus):\n line_int = self.uid[line_idx]\n if bus_idx == self.bus1[line_int]:\n P.append(self.P1[line_int])\n Q.append(self.Q1[line_int])\n elif bus_idx == self.bus2[line_int]:\n P.append(self.P2[line_int])\n Q.append(self.Q2[line_int])\n return matrix(P), matrix(Q)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning leaf bus idx line idx and line foreign key", "response": "def leaf_bus(self, df=False):\n \"\"\"\n Return leaf bus idx, line idx, and the line foreign key\n\n Returns\n -------\n (list, list, list) or DataFrame\n\n \"\"\"\n # leafs - leaf bus idx\n # lines - line idx\n # fkey - the foreign key of Line, in 'bus1' or 'bus2', linking the bus\n\n leafs, lines, fkeys = list(), list(), list()\n\n # convert to unique, ordered list\n buses = sorted(list(set(self.bus1 + self.bus2)))\n\n links = self.link_bus(buses)\n\n for bus, link in zip(buses, links):\n line = link[0]\n fkey = link[1]\n if line is None:\n continue\n if len(line) == 1:\n leafs.append(bus)\n lines.extend(line)\n fkeys.extend(fkey)\n\n # output formatting\n if df is False:\n return leafs, lines, fkeys\n else:\n _data = {'Bus idx': leafs, 'Line idx': lines, 'fkey': fkeys}\n if globals()['pd'] is None:\n globals()['pd'] = importlib.import_module('pandas')\n return pd.DataFrame(data=_data)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ntruncate the text into a list of strings of length length.", "response": "def truncate(text, length=255):\n \"\"\"\n Splits the message into a list of strings of of length `length`\n\n Args:\n text (str): The text to be divided\n length (int, optional): The length of the chunks of text. \\\n Defaults to 255.\n\n Returns:\n list: Text divided into chunks of length `length`\n \"\"\"\n\n lines = []\n i = 0\n while i < len(text) - 1:\n try:\n lines.append(text[i:i+length])\n i += length\n\n except IndexError as e:\n lines.append(text[i:])\n return lines"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef resize_for_flows(self):\n if self.system.config.dime_enable:\n self.system.tds.config.compute_flows = True\n\n if self.system.tds.config.compute_flows:\n nflows = 2 * self.system.Bus.n + \\\n 8 * self.system.Line.n + \\\n 2 * self.system.Area.n_combination\n self.unamey.extend([''] * nflows)\n self.fnamey.extend([''] * nflows)", "response": "Extend unamey and fnamey for bus injections and line flows"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef append(self, listname, xy_idx, var_name, element_name):\n self.resize()\n string = '{0} {1}'\n if listname not in ['unamex', 'unamey', 'fnamex', 'fnamey']:\n logger.error('Wrong list name for varname.')\n return\n elif listname in ['fnamex', 'fnamey']:\n string = '${0}\\\\ {1}$'\n\n if isinstance(element_name, list):\n for i, j in zip(xy_idx, element_name):\n # manual elem_add LaTex space for auto-generated element name\n if listname == 'fnamex' or listname == 'fnamey':\n j = j.replace(' ', '\\\\ ')\n self.__dict__[listname][i] = string.format(var_name, j)\n elif isinstance(element_name, int):\n self.__dict__[listname][xy_idx] = string.format(\n var_name, element_name)\n else:\n logger.warning(\n 'Unknown element_name type while building varname')", "response": "Append variable names to the name lists"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nappending bus injection and line flow names to varName", "response": "def bus_line_names(self):\n \"\"\"Append bus injection and line flow names to `varname`\"\"\"\n if self.system.tds.config.compute_flows:\n self.system.Bus._varname_inj()\n self.system.Line._varname_flow()\n self.system.Area._varname_inter()"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_xy_name(self, yidx, xidx=0):\n assert isinstance(xidx, int)\n if isinstance(yidx, int):\n yidx = [yidx]\n\n uname = ['Time [s]'] + self.uname\n fname = ['$Time\\\\ [s]$'] + self.fname\n\n xname = [list(), list()]\n yname = [list(), list()]\n\n xname[0] = uname[xidx]\n xname[1] = fname[xidx]\n\n yname[0] = [uname[i] for i in yidx]\n yname[1] = [fname[i] for i in yidx]\n\n return xname, yname", "response": "Return variable names for the given indices."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns a dictionary containing all known services.", "response": "def get_known_services():\n \"\"\"Return a dictionary of all known services.\n :return: A dictionary containing entries { service name : service class }\n\n Future: This will change to a dictionary containing references to\n factories: { service name : service class factory }\n A factory is a function that takes no arguments and returns an\n uninstantiated service class. This will avoid importing all\n service classes.\n \"\"\"\n if not hasattr(get_known_services, \"cache\"):\n setattr(\n get_known_services,\n \"cache\",\n {\n e.name: e.load()\n for e in pkg_resources.iter_entry_points(\"workflows.services\")\n },\n )\n register = get_known_services.cache.copy()\n return register"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncommand line input parser", "response": "def cli_parse():\n \"\"\"command line input parser\"\"\"\n parser = ArgumentParser(prog='andesplot')\n parser.add_argument('datfile', nargs=1, default=[], help='dat file name.')\n parser.add_argument('x', nargs=1, type=int, help='x axis variable index')\n parser.add_argument('y', nargs='*', help='y axis variable index')\n parser.add_argument('--xmax', type=float, help='x axis maximum value')\n parser.add_argument('--ymax', type=float, help='y axis maximum value')\n parser.add_argument('--ymin', type=float, help='y axis minimum value')\n parser.add_argument('--xmin', type=float, help='x axis minimum value')\n parser.add_argument(\n '--checkinit', action='store_true', help='check initialization value')\n parser.add_argument(\n '-x', '--xlabel', type=str, help='manual set x-axis text label')\n parser.add_argument('-y', '--ylabel', type=str, help='y-axis text label')\n parser.add_argument(\n '-s', '--save', action='store_true', help='save to file')\n parser.add_argument('-g', '--grid', action='store_true', help='grid on')\n parser.add_argument(\n '-d',\n '--no_latex',\n action='store_true',\n help='disable LaTex formatting')\n parser.add_argument(\n '-u',\n '--unattended',\n action='store_true',\n help='do not show the plot window')\n parser.add_argument('--ytimes', type=str, help='y times')\n parser.add_argument(\n '--dpi', type=int, help='image resolution in dot per inch (DPI)')\n args = parser.parse_args()\n return vars(args)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef add_plot(x, y, xl, yl, fig, ax, LATEX=False, linestyle=None, **kwargs):\n if LATEX:\n xl_data = xl[1] # NOQA\n yl_data = yl[1]\n else:\n xl_data = xl[0] # NOQA\n yl_data = yl[0]\n\n for idx in range(len(y)):\n ax.plot(x, y[idx], label=yl_data[idx], linestyle=linestyle)\n\n ax.legend(loc='upper right')\n ax.set_ylim(auto=True)", "response": "Add plots to an existing plot"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef check_init(yval, yl):\n suspect = []\n for var, label in zip(yval, yl):\n if abs(var[0] - var[-1]) >= 1e-6:\n suspect.append(label)\n if suspect:\n print('Initialization failure:')\n print(', '.join(suspect))\n else:\n print('Initialization is correct.')", "response": "Check initialization by comparing t = 0 and t = end values"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef load_lst(self):\n with open(self._lst_file, 'r') as fd:\n lines = fd.readlines()\n\n idx, uname, fname = list(), list(), list()\n\n for line in lines:\n values = line.split(',')\n values = [x.strip() for x in values]\n\n # preserve the idx ordering here in case variables are not\n # ordered by idx\n idx.append(int(values[0])) # convert to integer\n uname.append(values[1])\n fname.append(values[2])\n\n self._idx = idx\n self._fname = fname\n self._uname = uname", "response": "Load the lst file into internal data structures"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn variable names and indices matching query", "response": "def find_var(self, query, formatted=False):\n \"\"\"\n Return variable names and indices matching ``query``\n \"\"\"\n\n # load the variable list to search in\n names = self._uname if formatted is False else self._fname\n\n found_idx, found_names = list(), list()\n\n for idx, name in zip(self._idx, names):\n if re.search(query, name):\n found_idx.append(idx)\n found_names.append(name)\n\n return found_idx, found_names"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nload the dat file into internal data structures.", "response": "def load_dat(self, delimiter=','):\n \"\"\"\n Load the dat file into internal data structures, ``self._data``\n \"\"\"\n try:\n data = np.loadtxt(self._dat_file, delimiter=',')\n except ValueError:\n data = np.loadtxt(self._dat_file)\n\n self._data = data"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_values(self, idx):\n if isinstance(idx, list):\n idx = np.array(idx, dtype=int)\n\n return self._data[:, idx]", "response": "Return the variable values at the given indices"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_header(self, idx, formatted=False):\n header = self._uname if not formatted else self._fname\n return [header[x] for x in idx]", "response": "Return a list of the variable names at the given indices"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef export_csv(self, path, idx=None, header=None, formatted=False,\n sort_idx=True, fmt='%.18e'):\n \"\"\"\n Export to a csv file\n\n Parameters\n ----------\n path : str\n path of the csv file to save\n idx : None or array-like, optional\n the indices of the variables to export. Export all by default\n header : None or array-like, optional\n customized header if not `None`. Use the names from the lst file\n by default\n formatted : bool, optional\n Use LaTeX-formatted header. Does not apply when using customized\n header\n sort_idx : bool, optional\n Sort by idx or not, # TODO: implement sort\n fmt : str\n cell formatter\n \"\"\"\n if not idx:\n idx = self._idx\n if not header:\n header = self.get_header(idx, formatted=formatted)\n\n assert len(idx) == len(header), \\\n \"Idx length does not match header length\"\n\n body = self.get_values(idx)\n\n with open(path, 'w') as fd:\n fd.write(','.join(header) + '\\n')\n np.savetxt(fd, body, fmt=fmt, delimiter=',')", "response": "Exports the current state of the object to a csv file."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef auto_style(self):\n if self._row_size is None:\n return\n elif self._row_size == 3:\n self.set_cols_align(['l', 'l', 'l'])\n self.set_cols_valign(['t', 't', 't'])\n self.set_cols_width([12, 54, 12])\n elif self._row_size == 4:\n self.set_cols_align(['l', 'l', 'l', 'l'])\n self.set_cols_valign(['t', 't', 't', 't'])\n self.set_cols_width([10, 40, 10, 10])", "response": "automatic styling according to _row_size 76 characters in a row"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ngenerate texttable formatted string", "response": "def draw(self):\n \"\"\"generate texttable formatted string\"\"\"\n self.guess_header()\n self.add_left_space(\n )\n # for Texttable, elem_add a column of whitespace on the left for\n # better visual effect\n if self._title and self._descr:\n pre = self._title + '\\n' + self._descr + '\\n\\n'\n elif self._title:\n pre = self._title + '\\n\\n'\n elif self._descr:\n pre = 'Empty Title' + '\\n' + self._descr + '\\n'\n else:\n pre = ''\n empty_line = '\\n\\n'\n return pre + str(Texttable.draw(self)) + empty_line"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef guess_width(self):\n if len(self.header) <= 4:\n nspace = 6\n elif len(self.header) <= 6:\n nspace = 5\n else:\n nspace = 4\n ncol = len(self.header)\n self._width = [nspace] * ncol\n width = [0] * ncol\n\n # set initial width from header\n for idx, item in enumerate(self.header):\n width[idx] = len(str(item))\n\n # guess width of each column from first 10 lines of data\n samples = min(len(self.data), 10)\n for col in range(ncol):\n for idx in range(samples):\n data = self.data[idx][col]\n if not isinstance(data, (float, int)):\n temp = len(data)\n else:\n temp = 10\n if temp > width[col]:\n width[col] = temp\n\n for col in range(ncol):\n self._width[col] += width[col]", "response": "auto fit column width"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nsubscribing to a queue.", "response": "def subscribe(self, channel, callback, **kwargs):\n \"\"\"Listen to a queue, notify via callback function.\n :param channel: Queue name to subscribe to\n :param callback: Function to be called when messages are received.\n The callback will pass two arguments, the header as a\n dictionary structure, and the message.\n :param **kwargs: Further parameters for the transport layer. For example\n disable_mangling: Receive messages as unprocessed strings.\n exclusive: Attempt to become exclusive subscriber to the queue.\n acknowledgement: If true receipt of each message needs to be\n acknowledged.\n :return: A unique subscription ID\n \"\"\"\n self.__subscription_id += 1\n\n def mangled_callback(header, message):\n return callback(header, self._mangle_for_receiving(message))\n\n if \"disable_mangling\" in kwargs:\n if kwargs[\"disable_mangling\"]:\n mangled_callback = callback\n del kwargs[\"disable_mangling\"]\n self.__subscriptions[self.__subscription_id] = {\n \"channel\": channel,\n \"callback\": mangled_callback,\n \"ack\": kwargs.get(\"acknowledgement\"),\n \"unsubscribed\": False,\n }\n self.log.debug(\"Subscribing to %s with ID %d\", channel, self.__subscription_id)\n self._subscribe(self.__subscription_id, channel, mangled_callback, **kwargs)\n return self.__subscription_id"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nstop listening to a queue or broadcast.", "response": "def unsubscribe(self, subscription, drop_callback_reference=False, **kwargs):\n \"\"\"Stop listening to a queue or a broadcast\n :param subscription: Subscription ID to cancel\n :param drop_callback_reference: Drop the reference to the registered\n callback function immediately. This\n means any buffered messages still in\n flight will not arrive at the intended\n destination and cause exceptions to be\n raised instead.\n :param **kwargs: Further parameters for the transport layer.\n \"\"\"\n if subscription not in self.__subscriptions:\n raise workflows.Error(\"Attempting to unsubscribe unknown subscription\")\n if self.__subscriptions[subscription][\"unsubscribed\"]:\n raise workflows.Error(\n \"Attempting to unsubscribe already unsubscribed subscription\"\n )\n self._unsubscribe(subscription, **kwargs)\n self.__subscriptions[subscription][\"unsubscribed\"] = True\n if drop_callback_reference:\n self.drop_callback_reference(subscription)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ndrops reference to the callback function after subscribing.", "response": "def drop_callback_reference(self, subscription):\n \"\"\"Drop reference to the callback function after unsubscribing.\n Any future messages arriving for that subscription will result in\n exceptions being raised.\n :param subscription: Subscription ID to delete callback reference for.\n \"\"\"\n if subscription not in self.__subscriptions:\n raise workflows.Error(\n \"Attempting to drop callback reference for unknown subscription\"\n )\n if not self.__subscriptions[subscription][\"unsubscribed\"]:\n raise workflows.Error(\n \"Attempting to drop callback reference for live subscription\"\n )\n del self.__subscriptions[subscription]"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef subscribe_broadcast(self, channel, callback, **kwargs):\n self.__subscription_id += 1\n\n def mangled_callback(header, message):\n return callback(header, self._mangle_for_receiving(message))\n\n if \"disable_mangling\" in kwargs:\n if kwargs[\"disable_mangling\"]:\n mangled_callback = callback\n del kwargs[\"disable_mangling\"]\n self.__subscriptions[self.__subscription_id] = {\n \"channel\": channel,\n \"callback\": mangled_callback,\n \"ack\": False,\n \"unsubscribed\": False,\n }\n self.log.debug(\n \"Subscribing to broadcasts on %s with ID %d\",\n channel,\n self.__subscription_id,\n )\n self._subscribe_broadcast(\n self.__subscription_id, channel, mangled_callback, **kwargs\n )\n return self.__subscription_id", "response": "Subscribe to a topic and notify the callback function."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef subscription_callback(self, subscription):\n subscription_record = self.__subscriptions.get(subscription)\n if not subscription_record:\n raise workflows.Error(\"Attempting to callback on unknown subscription\")\n callback = subscription_record[\"callback\"]\n if self.__callback_interceptor:\n return self.__callback_interceptor(callback)\n return callback", "response": "Retrieve the callback function for a given subscription. Raise a\nException if the subscription does not exist."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef send(self, destination, message, **kwargs):\n message = self._mangle_for_sending(message)\n self._send(destination, message, **kwargs)", "response": "Send a message to a queue."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nsends a message to a queue.", "response": "def raw_send(self, destination, message, **kwargs):\n \"\"\"Send a raw (unmangled) message to a queue.\n This may cause errors if the receiver expects a mangled message.\n :param destination: Queue name to send to\n :param message: Either a string or a serializable object to be sent\n :param **kwargs: Further parameters for the transport layer. For example\n delay: Delay transport of message by this many seconds\n headers: Optional dictionary of header entries\n expiration: Optional expiration time, relative to sending time\n transaction: Transaction ID if message should be part of a\n transaction\n \"\"\"\n self._send(destination, message, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef broadcast(self, destination, message, **kwargs):\n message = self._mangle_for_sending(message)\n self._broadcast(destination, message, **kwargs)", "response": "Broadcast a message to a topic."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef raw_broadcast(self, destination, message, **kwargs):\n self._broadcast(destination, message, **kwargs)", "response": "Broadcast a message to a topic."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef ack(self, message, subscription_id=None, **kwargs):\n if isinstance(message, dict):\n message_id = message.get(\"message-id\")\n if not subscription_id:\n subscription_id = message.get(\"subscription\")\n else:\n message_id = message\n if not message_id:\n raise workflows.Error(\"Cannot acknowledge message without \" + \"message ID\")\n if not subscription_id:\n raise workflows.Error(\n \"Cannot acknowledge message without \" + \"subscription ID\"\n )\n self.log.debug(\n \"Acknowledging message %s on subscription %s\", message_id, subscription_id\n )\n self._ack(message_id, subscription_id, **kwargs)", "response": "Acknowledge receipt of a message."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef nack(self, message, subscription_id=None, **kwargs):\n if isinstance(message, dict):\n message_id = message.get(\"message-id\")\n if not subscription_id:\n subscription_id = message.get(\"subscription\")\n else:\n message_id = message\n if not message_id:\n raise workflows.Error(\"Cannot reject message without \" + \"message ID\")\n if not subscription_id:\n raise workflows.Error(\"Cannot reject message without \" + \"subscription ID\")\n self.log.debug(\n \"Rejecting message %s on subscription %s\", message_id, subscription_id\n )\n self._nack(message_id, subscription_id, **kwargs)", "response": "Reject receipt of a message."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef transaction_begin(self, **kwargs):\n self.__transaction_id += 1\n self.__transactions.add(self.__transaction_id)\n self.log.debug(\"Starting transaction with ID %d\", self.__subscription_id)\n self._transaction_begin(self.__transaction_id, **kwargs)\n return self.__transaction_id", "response": "Begin a new transaction."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\naborts a transaction and roll back all operations.", "response": "def transaction_abort(self, transaction_id, **kwargs):\n \"\"\"Abort a transaction and roll back all operations.\n :param transaction_id: ID of transaction to be aborted.\n :param **kwargs: Further parameters for the transport layer.\n \"\"\"\n if transaction_id not in self.__transactions:\n raise workflows.Error(\"Attempting to abort unknown transaction\")\n self.log.debug(\"Aborting transaction %s\", transaction_id)\n self.__transactions.remove(transaction_id)\n self._transaction_abort(transaction_id, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef transaction_commit(self, transaction_id, **kwargs):\n if transaction_id not in self.__transactions:\n raise workflows.Error(\"Attempting to commit unknown transaction\")\n self.log.debug(\"Committing transaction %s\", transaction_id)\n self.__transactions.remove(transaction_id)\n self._transaction_commit(transaction_id, **kwargs)", "response": "Commit a transaction.\n :param transaction_id: ID of transaction to be committed.\n :param **kwargs: Further parameters for the transport layer."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreading a MATPOWER data file into mpc and build andes device elements", "response": "def read(file, system):\n \"\"\"Read a MATPOWER data file into mpc and build andes device elements\"\"\"\n func = re.compile('function\\\\s')\n mva = re.compile('\\\\s*mpc.baseMVA\\\\s*=\\\\s*')\n bus = re.compile('\\\\s*mpc.bus\\\\s*=\\\\s*\\\\[')\n gen = re.compile('\\\\s*mpc.gen\\\\s*=\\\\s*\\\\[')\n branch = re.compile('\\\\s*mpc.branch\\\\s*=\\\\s*\\\\[')\n area = re.compile('\\\\s*mpc.areas\\\\s*=\\\\s*\\\\[')\n gencost = re.compile('\\\\s*mpc.gencost\\\\s*=\\\\s*\\\\[')\n bus_name = re.compile('\\\\s*mpc.bus_name\\\\s*=\\\\s*\\\\{')\n end = re.compile('\\\\s*\\\\];?')\n hasdigit = re.compile('.*\\\\d+\\\\s*]?;?')\n comment = re.compile('\\\\s*%.*')\n\n retval = True\n field = None\n info = True\n\n basemva = 100\n mpc = {\n 'bus': [],\n 'gen': [],\n 'branch': [],\n 'area': [],\n 'gencost': [],\n 'bus_name': [],\n }\n\n fid = open(file, 'r')\n\n for line in fid:\n line = line.strip().rstrip(';')\n if not line:\n continue\n elif func.search(line): # skip function declaration\n continue\n elif comment.search(line): # for comment lines\n if info:\n logger.info(line[1:72])\n info = False\n else:\n continue\n elif mva.search(line):\n basemva = float(line.split('=')[1])\n\n if not field:\n if bus.search(line):\n field = 'bus'\n elif gen.search(line):\n field = 'gen'\n elif branch.search(line):\n field = 'branch'\n elif area.search(line):\n field = 'area'\n elif gencost.search(line):\n field = 'gencost'\n elif bus_name.search(line):\n field = 'bus_name'\n else:\n continue\n elif end.search(line):\n field = None\n continue\n\n # parse mpc sections\n if field:\n if line.find('=') >= 0:\n line = line.split('=')[1]\n if line.find('[') >= 0:\n line = re.sub(r'\\[', '', line)\n elif line.find('{') >= 0:\n line = re.sub(r'{', '', line)\n\n if line.find('\\'') >= 0: # bus_name\n line = line.split(';')\n data = [i.strip('\\'').strip() for i in line]\n mpc['bus_name'].extend(data)\n else:\n if not hasdigit.search(line):\n continue\n line = line.split(';')\n for item in line:\n data = [float(val) for val in item.split()]\n mpc[field].append(data)\n\n fid.close()\n\n # add model elements to system\n sw = []\n\n system.mva = basemva\n\n for data in mpc['bus']:\n # idx ty pd qd gs bs area vmag vang baseKV zone vmax vmin\n # 0 1 2 3 4 5 6 7 8 9 10 11 12\n idx = int(data[0])\n ty = data[1]\n if ty == 3:\n sw.append(idx)\n pd = data[2] / basemva\n qd = data[3] / basemva\n gs = data[4] / basemva\n bs = data[5] / basemva\n area = data[6]\n vmag = data[7]\n vang = data[8] * deg2rad\n baseKV = data[9]\n zone = data[10]\n vmax = data[11]\n vmin = data[12]\n\n try:\n system.Bus.elem_add(\n idx=idx,\n name='Bus ' + str(idx),\n Vn=baseKV,\n voltage=vmag,\n angle=vang,\n vmax=vmax,\n vmin=vmin,\n area=area,\n region=zone)\n if pd or qd:\n system.PQ.elem_add(\n bus=idx, name='PQ ' + str(idx), Vn=baseKV, p=pd, q=qd)\n if gs or bs:\n system.Shunt.elem_add(\n bus=idx, name='Shunt ' + str(idx), Vn=baseKV, g=gs, b=bs)\n except KeyError:\n logger.error('Error adding to powersystem object.')\n retval = False\n\n gen_idx = 0\n for data in mpc['gen']:\n # bus pg qg qmax qmin vg mbase status pmax pmin pc1 pc2\n # 0 1 2 3 4 5 6 7 8 9 10 11\n # qc1min qc1max qc2min qc2max ramp_agc ramp_10 ramp_30 ramp_q\n # 12 13 14 15 16 17 18 19\n # apf\n # 20\n\n bus_idx = int(data[0])\n gen_idx += 1\n vg = data[5]\n status = int(data[7])\n mbase = basemva\n pg = data[1] / mbase\n qg = data[2] / mbase\n qmax = data[3] / mbase\n qmin = data[4] / mbase\n pmax = data[8] / mbase\n pmin = data[9] / mbase\n\n try:\n vn = system.Bus.Vn[system.Bus.uid[bus_idx]]\n if bus_idx in sw:\n system.SW.elem_add(\n idx=gen_idx,\n bus=bus_idx,\n busr=bus_idx,\n name='SW ' + str(bus_idx),\n u=status,\n Vn=vn,\n v0=vg,\n pg=pg,\n qg=qg,\n pmax=pmax,\n pmin=pmin,\n qmax=qmax,\n qmin=qmin,\n a0=0.0)\n else:\n system.PV.elem_add(\n idx=gen_idx,\n bus=bus_idx,\n busr=bus_idx,\n name='PV ' + str(bus_idx),\n u=status,\n Vn=vn,\n v0=vg,\n pg=pg,\n qg=qg,\n pmax=pmax,\n pmin=pmin,\n qmax=qmax,\n qmin=qmin)\n except KeyError:\n logger.error(\n 'Error adding or to powersystem object.')\n retval = False\n\n for data in mpc['branch']:\n # fbus\ttbus\tr\tx\tb\trateA\trateB\trateC\tratio\tangle\n # 0 1 2 3 4 5 6 7 8 9\n # status\tangmin\tangmax\tPf\tQf\tPt\tQt\n # 10 11 12 13 14 15 16\n fbus = data[0]\n tbus = data[1]\n r = data[2]\n x = data[3]\n b = data[4]\n status = int(data[10])\n\n if data[8] == 0.0: # not a transformer\n tf = False\n ratio = 1\n angle = 0\n elif data[8] == 1.0 and data[9] == 0.0: # not a transformer\n tf = False\n ratio = 1\n angle = 0\n else:\n tf = True\n ratio = data[8]\n angle = data[9]\n try:\n vf = system.Bus.Vn[system.Bus.uid[fbus]]\n vt = system.Bus.Vn[system.Bus.uid[tbus]]\n system.Line.elem_add(\n Vn=vf,\n Vn2=vt,\n bus1=fbus,\n bus2=tbus,\n r=r,\n x=x,\n b=b,\n u=status,\n trasf=tf,\n tap=ratio,\n phi=angle)\n except KeyError:\n logger.error('Error adding to powersystem object.')\n retval = False\n if len(mpc['bus_name']) == len(system.Bus.name):\n system.Bus.name[:] = mpc['bus_name']\n\n return retval"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nadd self into full Jacobian Y", "response": "def full_y(self, Y):\n \"\"\"Add self(shunt) into full Jacobian Y\"\"\"\n if not self.n:\n return\n Ysh = matrix(self.g,\n (self.n, 1), 'd') + 1j * matrix(self.b, (self.n, 1), 'd')\n uYsh = mul(self.u, Ysh)\n Y += spmatrix(uYsh, self.a, self.a, Y.size, 'z')"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nimporting and instantiates this JIT object", "response": "def jit_load(self):\n \"\"\"\n Import and instantiate this JIT object\n\n Returns\n -------\n\n \"\"\"\n try:\n model = importlib.import_module('.' + self.model, 'andes.models')\n device = getattr(model, self.device)\n self.system.__dict__[self.name] = device(self.system, self.name)\n\n g = self.system.__dict__[self.name]._group\n self.system.group_add(g)\n self.system.__dict__[g].register_model(self.name)\n\n # register device after loading\n self.system.devman.register_device(self.name)\n self.loaded = 1\n logger.debug('Imported model <{:s}.{:s}>.'.format(\n self.model, self.device))\n except ImportError:\n logger.error(\n 'non-JIT model <{:s}.{:s}> import error'\n .format(self.model, self.device))\n except AttributeError:\n logger.error(\n 'model <{:s}.{:s}> not exist. Check models/__init__.py'\n .format(self.model, self.device))"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\noverloading elem_add function of a JIT class", "response": "def elem_add(self, idx=None, name=None, **kwargs):\n \"\"\"overloading elem_add function of a JIT class\"\"\"\n self.jit_load()\n if self.loaded:\n return self.system.__dict__[self.name].elem_add(\n idx, name, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef initializing(self):\n self.subid = self._transport.subscribe(\n \"transient.transaction\", self.receive_message, acknowledgement=True\n )", "response": "Subscribe to a channel. Received messages must be acknowledged."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreceiving a message from the broker.", "response": "def receive_message(self, header, message):\n \"\"\"Receive a message\"\"\"\n\n print(\"=== Receive ===\")\n print(header)\n print(message)\n\n print(\"MsgID: {0}\".format(header[\"message-id\"]))\n assert header[\"message-id\"]\n\n txn = self._transport.transaction_begin()\n print(\" 1. Txn: {0}\".format(str(txn)))\n if self.crashpoint():\n self._transport.transaction_abort(txn)\n print(\"--- Abort ---\")\n return\n\n self._transport.ack(header[\"message-id\"], self.subid, transaction=txn)\n print(\" 2. Ack\")\n if self.crashpoint():\n self._transport.transaction_abort(txn)\n print(\"--- Abort ---\")\n return\n\n self._transport.send(\"transient.destination\", message, transaction=txn)\n print(\" 3. Send\")\n\n if self.crashpoint():\n self._transport.transaction_abort(txn)\n print(\"--- Abort ---\")\n return\n\n self._transport.transaction_commit(txn)\n print(\" 4. Commit\")\n print(\"=== Done ===\")"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating and send a unique message for this service.", "response": "def create_message(self):\n \"\"\"Create and send a unique message for this service.\"\"\"\n self.counter += 1\n self._transport.send(\n \"transient.transaction\",\n \"TXMessage #%d\\n++++++++Produced@ %f\"\n % (self.counter, (time.time() % 1000) * 1000),\n )\n self.log.info(\"Created message %d\", self.counter)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef elapsed(t0=0.0):\n now = time()\n dt = now - t0\n dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN)\n if dt_sec <= 1:\n dt_str = str(dt_sec) + ' second'\n else:\n dt_str = str(dt_sec) + ' seconds'\n return now, dt_str", "response": "get elapsed time from the give time\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_flag(self, value, flag, reset_val=False):\n if not self.__dict__[flag]:\n self.__dict__[flag] = matrix(1.0, (len(self.__dict__[value]), 1),\n 'd')\n for idx, item in enumerate(self.__dict__[value]):\n if item == 0:\n self.__dict__[flag][idx] = 0\n if reset_val:\n self.__dict__[value][idx] = 1", "response": "Set a flag to 0 if the corresponding value is 0"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _update_summary(self, system):\n self.basic.update({\n 'nbus': system.Bus.n,\n 'ngen': system.PV.n + system.SW.n,\n 'ngen_on': sum(system.PV.u) + sum(system.SW.u),\n 'nload': system.PQ.n,\n 'nshunt': system.Shunt.n,\n 'nline': system.Line.n,\n 'ntransf': system.Line.trasf.count(True),\n 'narea': system.Area.n,\n })", "response": "Update the summary data for the current set of items."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate the extended data for the current version of the extended data.", "response": "def _update_extended(self, system):\n \"\"\"Update the extended data\"\"\"\n if self.system.pflow.solved is False:\n logger.warning(\n 'Cannot update extended summary. Power flow not solved.')\n return\n\n Sloss = sum(system.Line.S1 + system.Line.S2)\n self.extended.update({\n 'Ptot':\n sum(system.PV.pmax) + sum(system.SW.pmax), # + sum(system.SW.pmax)\n 'Pon':\n sum(mul(system.PV.u, system.PV.pmax)),\n 'Pg':\n sum(system.Bus.Pg),\n 'Qtot_min':\n sum(system.PV.qmin) + sum(system.SW.qmin),\n 'Qtot_max':\n sum(system.PV.qmax) + sum(system.SW.qmax),\n 'Qon_min':\n sum(mul(system.PV.u, system.PV.qmin)),\n 'Qon_max':\n sum(mul(system.PV.u, system.PV.qmax)),\n 'Qg':\n round(sum(system.Bus.Qg), 5),\n 'Pl':\n round(sum(system.PQ.p), 5),\n 'Ql':\n round(sum(system.PQ.q), 5),\n 'Psh':\n 0.0,\n 'Qsh':\n round(sum(system.PQ.q) - sum(system.Bus.Ql), 5),\n 'Ploss':\n round(Sloss.real, 5),\n 'Qloss':\n round(Sloss.imag, 5),\n 'Pch':\n round(sum(system.Line.Pchg1 + system.Line.Pchg2), 5),\n 'Qch':\n round(sum(system.Line.Qchg1 + system.Line.Qchg2), 5),\n })"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nupdates the internal state of the object based on the content of the object.", "response": "def update(self, content=None):\n \"\"\"\n Update values based on the requested content\n\n Parameters\n ----------\n content\n\n Returns\n -------\n\n \"\"\"\n if not content:\n return\n if content == 'summary' or 'extended' or 'powerflow':\n self._update_summary(self.system)\n if content == 'extended' or 'powerflow':\n self._update_extended(self.system)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nwriting a report to a file.", "response": "def write(self, content=None):\n \"\"\"\n Write report to file.\n\n Parameters\n ----------\n content: str\n 'summary', 'extended', 'powerflow'\n\n \"\"\"\n if self.system.files.no_output is True:\n return\n\n t, _ = elapsed()\n\n if not content:\n logger.warning('report content not specified.')\n return\n\n self.update(content)\n\n system = self.system\n file = system.files.output\n export = all_formats.get(system.config.export, 'txt')\n module = importlib.import_module('andes.formats.' + export)\n dump_data = getattr(module, 'dump_data')\n\n text = list()\n header = list()\n rowname = list()\n data = list()\n\n text.append(self.info)\n header.append(None)\n rowname.append(None)\n data.append(None)\n\n if content == 'summary' or 'extended' or 'powerflow':\n text.append(['SUMMARY:\\n'])\n header.append(None)\n rowname.append(self._basic_name)\n data.append([self.basic[item] for item in self._basic])\n\n if content == 'extended' or 'powerflow':\n text.append(['EXTENDED SUMMARY:\\n'])\n header.append(['P (pu)', 'Q (pu)'])\n rowname.append(\n ['Generation', 'Load', 'Shunt Inj', 'Losses', 'Line Charging'])\n Pcol = [\n self.extended['Pg'],\n self.extended['Pl'],\n self.extended['Psh'],\n self.extended['Ploss'],\n self.extended['Pch'],\n ]\n\n Qcol = [\n self.extended['Qg'],\n self.extended['Ql'],\n self.extended['Qsh'],\n self.extended['Qloss'],\n self.extended['Qch'],\n ]\n\n data.append([Pcol, Qcol])\n\n if content == 'powerflow' and system.pflow.solved:\n idx, name, Vm, Va, Pg, Qg, Pl, Ql = system.get_busdata()\n Va_unit = 'deg' if system.pflow.config.usedegree else 'rad'\n text.append(['BUS DATA:\\n'])\n # todo: consider system.pflow.config.units\n header.append([\n 'Vm(pu)', 'Va({:s})'.format(Va_unit), 'Pg (pu)', 'Qg (pu)',\n 'Pl (pu)', 'Ql (pu)'\n ])\n name = [str(i) + '-' + j[:8] for i, j in zip(idx, name)]\n rowname.append(name)\n data.append([Vm, Va, Pg, Qg, Pl, Ql])\n\n # Node data\n if hasattr(system, 'Node') and system.Node.n:\n idx, name, V = system.get_nodedata()\n text.append(['NODE DATA:\\n'])\n header.append(['V(pu)'])\n rowname.append(name)\n data.append([V])\n\n # Line data\n name, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss = \\\n system.get_linedata()\n text.append(['LINE DATA:\\n'])\n header.append([\n 'From Bus', 'To Bus', 'P From (pu)', 'Q From (pu)',\n 'P To (pu)', 'Q To(pu)', 'P Loss(pu)', 'Q Loss(pu)'\n ])\n rowname.append(name)\n data.append([fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss])\n\n # Additional Algebraic data\n text.append(['OTHER ALGEBRAIC VARIABLES:\\n'])\n header.append([''])\n rowname.append(\n system.varname.unamey[2 * system.Bus.n:system.dae.m])\n data.append([round(i, 5) for i in system.dae.y[2 * system.Bus.n:]])\n\n # Additional State variable data\n if system.dae.n:\n text.append(['OTHER STATE VARIABLES:\\n'])\n header.append([''])\n rowname.append(system.varname.unamex[:])\n data.append([round(i, 5) for i in system.dae.x[:]])\n\n dump_data(text, header, rowname, data, file)\n\n _, s = elapsed(t)\n logger.info('report written to <{:s}> in {:s}.'.format(system.files.output, s))"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef send(self, *args, **kwargs):\n if not self.transport:\n raise ValueError(\n \"This RecipeWrapper object does not contain \"\n \"a reference to a transport object.\"\n )\n\n if not self.recipe_step:\n raise ValueError(\n \"This RecipeWrapper object does not contain \"\n \"a recipe with a selected step.\"\n )\n\n if \"output\" not in self.recipe_step:\n # The current recipe step does not have output channels.\n return\n\n if isinstance(self.recipe_step[\"output\"], dict):\n # The current recipe step does have named output channels.\n if self.default_channel:\n # Use named output channel\n self.send_to(self.default_channel, *args, **kwargs)\n\n else:\n # The current recipe step does have unnamed output channels.\n self._send_to_destinations(self.recipe_step[\"output\"], *args, **kwargs)", "response": "Send messages to another service that is connected to the currently\n running service via the recipe."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ntriggering the start of a recipe.", "response": "def start(self, header=None, **kwargs):\n \"\"\"Trigger the start of a recipe, sending the defined payloads to the\n recipients set in the recipe. Any parameters to this function are\n passed to the transport send/broadcast methods.\n If the wrapped recipe has already been started then a ValueError will\n be raised.\n \"\"\"\n if not self.transport:\n raise ValueError(\n \"This RecipeWrapper object does not contain \"\n \"a reference to a transport object.\"\n )\n\n if self.recipe_step:\n raise ValueError(\"This recipe has already been started.\")\n\n for destination, payload in self.recipe[\"start\"]:\n self._send_to_destination(destination, header, payload, kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef checkpoint(self, message, header=None, delay=0, **kwargs):\n if not self.transport:\n raise ValueError(\n \"This RecipeWrapper object does not contain \"\n \"a reference to a transport object.\"\n )\n\n if not self.recipe_step:\n raise ValueError(\n \"This RecipeWrapper object does not contain \"\n \"a recipe with a selected step.\"\n )\n\n kwargs[\"delay\"] = delay\n\n self._send_to_destination(\n self.recipe_pointer, header, message, kwargs, add_path_step=False\n )", "response": "Send a message to the current recipe destination."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngenerating a full recipe message for the given destination.", "response": "def _generate_full_recipe_message(self, destination, message, add_path_step):\n \"\"\"Factory function to generate independent message objects for\n downstream recipients with different destinations.\"\"\"\n if add_path_step and self.recipe_pointer:\n recipe_path = self.recipe_path + [self.recipe_pointer]\n else:\n recipe_path = self.recipe_path\n\n return {\n \"environment\": self.environment,\n \"payload\": message,\n \"recipe\": self.recipe.recipe,\n \"recipe-path\": recipe_path,\n \"recipe-pointer\": destination,\n }"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nsend a message to a list of numbered destinations.", "response": "def _send_to_destinations(self, destinations, message, header=None, **kwargs):\n \"\"\"Send messages to a list of numbered destinations. This is an internal\n helper method used by the public 'send' methods.\n \"\"\"\n if not isinstance(destinations, list):\n destinations = (destinations,)\n for destination in destinations:\n self._send_to_destination(destination, header, message, kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nrun when a message event is received AttributeNames", "response": "def on_message(self, event):\n '''Runs when a message event is received\n\n Args:\n event: RTM API event.\n\n Returns:\n Legobot.messge\n '''\n\n metadata = self._parse_metadata(event)\n message = Message(text=metadata['text'],\n metadata=metadata).__dict__\n if message.get('text'):\n message['text'] = self.find_and_replace_userids(message['text'])\n message['text'] = self.find_and_replace_channel_refs(\n message['text']\n )\n return message"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nextends the run method of threading. Thread", "response": "def run(self):\n '''Extends the run() method of threading.Thread\n '''\n\n self.connect()\n while True:\n for event in self.slack_client.rtm_read():\n logger.debug(event)\n if 'type' in event and event['type'] in self.supported_events:\n event_type = event['type']\n dispatcher = self.supported_events[event_type]\n message = dispatcher(event)\n logger.debug(message)\n self.baseplate.tell(message)\n self.keepalive()\n time.sleep(0.1)\n return"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef find_and_replace_userids(self, text):\n '''Finds occurrences of Slack userids and attempts to replace them with\n display names.\n\n Args:\n text (string): The message text\n Returns:\n string: The message text with userids replaced.\n '''\n\n match = True\n pattern = re.compile('<@([A-Z0-9]{9})>')\n while match:\n match = pattern.search(text)\n if match:\n name = self.get_user_display_name(match.group(1))\n text = re.sub(re.compile(match.group(0)), '@' + name, text)\n\n return text", "response": "Finds occurrences of Slack userids and attempts to replace them with\n display names."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef find_and_replace_channel_refs(self, text):\n '''Find occurrences of Slack channel referenfces and attempts to\n replace them with just channel names.\n\n Args:\n text (string): The message text\n Returns:\n string: The message text with channel references replaced.\n '''\n\n match = True\n pattern = re.compile('<#([A-Z0-9]{9})\\|([A-Za-z0-9-]+)>')\n while match:\n match = pattern.search(text)\n if match:\n text = text.replace(match.group(0), '#' + match.group(2))\n\n return text", "response": "Find occurrences of Slack channel referenfces and attempts to\n replace them with just channel names."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ngrab all channels in the Slack team", "response": "def get_channels(self, condensed=False):\n '''Grabs all channels in the slack team\n\n Args:\n condensed (bool): if true triggers list condensing functionality\n\n Returns:\n dic: Dict of channels in Slack team.\n See also: https://api.slack.com/methods/channels.list\n '''\n\n channel_list = self.slack_client.api_call('channels.list')\n if not channel_list.get('ok'):\n return None\n\n if condensed:\n channels = [{'id': item.get('id'), 'name': item.get('name')}\n for item in channel_list.get('channels')]\n return channels\n else:\n return channel_list"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_users(self, condensed=False):\n '''Grabs all users in the slack team\n\n This should should only be used for getting list of all users. Do not\n use it for searching users. Use get_user_info instead.\n\n Args:\n condensed (bool): if true triggers list condensing functionality\n\n Returns:\n dict: Dict of users in Slack team.\n See also: https://api.slack.com/methods/users.list\n '''\n\n user_list = self.slack_client.api_call('users.list')\n if not user_list.get('ok'):\n return None\n if condensed:\n users = [{'id': item.get('id'), 'name': item.get('name'),\n 'display_name': item.get('profile').get('display_name')}\n for item in user_list.get('members')]\n return users\n else:\n return user_list", "response": "Grabs all users in the Slack team and returns a dict of users in Slack team."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngives a Slack userid grabs user display_name from api. Returns a dict containing the api response", "response": "def get_user_display_name(self, userid):\n '''Given a Slack userid, grabs user display_name from api.\n\n Args:\n userid (string): the user id of the user being queried\n Returns:\n dict: a dictionary of the api response\n '''\n\n user_info = self.slack_client.api_call('users.info', user=userid)\n if user_info.get('ok'):\n user = user_info.get('user')\n if user.get('profile'):\n return user.get('profile').get('display_name')\n else:\n return user.get('name')\n else:\n return userid"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_dm_channel(self, userid):\n '''Perform a lookup of users to resolve a userid to a DM channel\n\n Args:\n userid (string): Slack userid to lookup.\n\n Returns:\n string: DM channel ID of user\n '''\n\n dm_open = self.slack_client.api_call('im.open', user=userid)\n return dm_open['channel']['id']", "response": "Perform a lookup of users to resolve a userid to a DM channel"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_username(self, userid):\n '''Perform a lookup of users to resolve a userid to a username\n\n Args:\n userid (string): Slack userid to lookup.\n\n Returns:\n string: Human-friendly name of the user\n '''\n\n username = self.user_map.get(userid)\n if not username:\n users = self.get_users()\n if users:\n members = {\n m['id']: m['name']\n for m in users.get('members', [{}])\n if m.get('id')\n and m.get('name')\n }\n if members:\n self.user_map.update(members)\n\n username = self.user_map.get(userid, userid)\n\n return username", "response": "Perform a lookup of users to resolve a userid to a username"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nperforming a lookup of bots. info to resolve a botid to a userid value", "response": "def get_userid_from_botid(self, botid):\n '''Perform a lookup of bots.info to resolve a botid to a userid\n\n Args:\n botid (string): Slack botid to lookup.\n Returns:\n string: userid value\n '''\n botinfo = self.slack_client.api_call('bots.info', bot=botid)\n if botinfo['ok'] is True:\n return botinfo['bot'].get('user_id')\n else:\n return botid"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _parse_metadata(self, message):\n '''Parse incoming messages to build metadata dict\n Lots of 'if' statements. It sucks, I know.\n\n Args:\n message (dict): JSON dump of message sent from Slack\n\n Returns:\n Legobot.Metadata\n '''\n\n # Try to handle all the fields of events we care about.\n metadata = Metadata(source=self.actor_urn).__dict__\n metadata['thread_ts'] = message.get('thread_ts')\n if 'presence' in message:\n metadata['presence'] = message['presence']\n\n if 'text' in message:\n metadata['text'] = message['text']\n elif 'previous_message' in message:\n # Try to handle slack links\n if 'text' in message['previous_message']:\n metadata['text'] = message['previous_message']['text']\n else:\n metadata['text'] = None\n else:\n metadata['text'] = None\n\n if 'user' in message:\n metadata['source_user'] = message['user']\n elif 'bot_id' in message:\n metadata['source_user'] = self.get_userid_from_botid(\n message['bot_id'])\n elif 'message' in message and 'user' in message['message']:\n metadata['source_user'] = message['message']['user']\n else:\n metadata['source_user'] = None\n\n metadata['user_id'] = metadata['source_user']\n metadata['display_name'] = self.get_username(metadata['source_user'])\n\n if 'channel' in message:\n metadata['source_channel'] = message['channel']\n # Slack starts DM channel IDs with \"D\"\n if message['channel'].startswith('D'):\n metadata['is_private_message'] = True\n else:\n metadata['is_private_message'] = False\n\n metadata['source_connector'] = 'slack'\n\n return metadata", "response": "Parse incoming messages to build metadata dict\nAttributeNames"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nbuilding a slack attachment.", "response": "def build_attachment(self, text, target, attachment, thread):\n '''Builds a slack attachment.\n\n Args:\n message (Legobot.Message): message w/ metadata to send.\n\n Returns:\n attachment (dict): attachment data.\n '''\n\n attachment = {\n 'as_user': True,\n 'text': text,\n 'channel': target,\n 'attachments': [\n {\n 'fallback': text,\n 'image_url': attachment\n }\n ]\n }\n if thread:\n attachment['thread_ts'] = thread\n return attachment"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef handle(self, message):\n '''Attempts to send a message to the specified destination in Slack.\n Extends Legobot.Lego.handle()\n\n Args:\n message (Legobot.Message): message w/ metadata to send.\n '''\n\n logger.debug(message)\n if Utilities.isNotEmpty(message['metadata']['opts']):\n target = message['metadata']['opts']['target']\n thread = message['metadata']['opts'].get('thread')\n # pattern = re.compile('@([a-zA-Z0-9._-]+)')\n pattern = re.compile('^@([a-zA-Z0-9._-]+)|\\s@([a-zA-Z0-9._-]+)')\n matches = re.findall(pattern, message['text'])\n matches = set(matches)\n logger.debug('MATCHES!!!! {}'.format(matches))\n for match in matches:\n if isinstance(match, tuple):\n if match[0] != '':\n match = match[0]\n else:\n match = match[1]\n if not match.startswith('@'):\n match = '@' + match\n message['text'] = message['text'].replace(\n match,\n '<{}>'.format(match)\n )\n\n pattern = re.compile('#([A-Za-z0-9-]+)')\n matches = re.findall(pattern, message['text'])\n matches = set(matches)\n for match in matches:\n channel_id = self.botThread.get_channel_id_by_name(match)\n if channel_id:\n message['text'] = message['text'].replace(\n '#' + match,\n '<#{}|{}>'.format(\n channel_id,\n match\n )\n )\n\n if (message['text'].find('<<@') != -1\n or message['text'].find('<<#') != -1):\n message['text'] = message['text'].replace('<<', '<')\n message['text'] = message['text'].replace('>>', '>')\n\n if target.startswith('U'):\n target = self.botThread.get_dm_channel(target)\n attachment = message['metadata']['opts'].get('attachment')\n if attachment:\n text = message['metadata']['opts'].get('fallback')\n attachment = self.build_attachment(\n text, target, attachment, thread)\n self.botThread.post_attachment(attachment)\n else:\n self.botThread.slack_client.rtm_send_message(\n target, message['text'], thread=thread)", "response": "Handles a message from the specified destination in Slack."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef guess(system):\n files = system.files\n maybe = []\n if files.input_format:\n maybe.append(files.input_format)\n # first, guess by extension\n for key, val in input_formats.items():\n if type(val) == list:\n for item in val:\n if files.ext.strip('.').lower() == item:\n maybe.append(key)\n else:\n if files.ext.strip('.').lower() == val:\n maybe.append(key)\n\n # second, guess by lines\n true_format = ''\n fid = open(files.case, 'r')\n for item in maybe:\n try:\n parser = importlib.import_module('.' + item, __name__)\n testlines = getattr(parser, 'testlines')\n if testlines(fid):\n true_format = item\n break\n except ImportError:\n logger.debug(\n 'Parser for {:s} format is not found. '\n 'Format guess will continue.'.\n format(item))\n fid.close()\n\n if true_format:\n logger.debug('Input format guessed as {:s}.'.format(true_format))\n else:\n logger.error('Unable to determine case format.')\n\n files.input_format = true_format\n\n # guess addfile format\n if files.addfile:\n _, add_ext = os.path.splitext(files.addfile)\n for key, val in input_formats.items():\n if type(val) == list:\n if add_ext[1:] in val:\n files.add_format = key\n else:\n if add_ext[1:] == val:\n files.add_format = key\n\n return true_format", "response": "Guesses the input format of the current node."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef parse(system):\n\n t, _ = elapsed()\n\n input_format = system.files.input_format\n add_format = system.files.add_format\n # exit when no input format is given\n if not input_format:\n logger.error(\n 'No input format found. Specify or guess a format before parsing.')\n return False\n\n # exit if the format parser could not be imported\n try:\n parser = importlib.import_module('.' + input_format, __name__)\n dmparser = importlib.import_module('.' + 'dome', __name__)\n if add_format:\n addparser = importlib.import_module('.' + add_format, __name__)\n except ImportError:\n logger.error(\n 'Parser for {:s} format not found. Program will exit.'.format(\n input_format))\n return False\n\n # try parsing the base case file\n logger.info('Parsing input file <{:s}>'.format(system.files.fullname))\n\n if not parser.read(system.files.case, system):\n logger.error(\n 'Error parsing case file {:s} with {:s} format parser.'.format(\n system.files.fullname, input_format))\n return False\n\n # Try parsing the addfile\n if system.files.addfile:\n if not system.files.add_format:\n logger.error('Unknown addfile format.')\n return\n logger.info('Parsing additional file {:s}.'.format(\n system.files.addfile))\n if not addparser.readadd(system.files.addfile, system):\n logger.error(\n 'Error parsing addfile {:s} with {:s} format parser.'.format(\n system.files.addfile, input_format))\n return False\n\n # Try parsing the dynfile with dm filter\n if system.files.dynfile:\n logger.info('Parsing input file {:s}.'.format(\n system.files.dynfile))\n if not dmparser.read(system.files.dynfile, system):\n logger.error(\n 'Error parsing dynfile {:s} with dm format parser.'.format(\n system.files.dynfile))\n return False\n\n _, s = elapsed(t)\n logger.debug('Case file {:s} parsed in {:s}.'.format(\n system.files.fullname, s))\n\n return True", "response": "Parse the input file with the given format."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef calc_state_matrix(self):\n system = self.system\n\n Gyx = matrix(system.dae.Gx)\n self.solver.linsolve(system.dae.Gy, Gyx)\n\n self.As = matrix(system.dae.Fx - system.dae.Fy * Gyx)\n\n # ------------------------------------------------------\n # TODO: use scipy eigs\n # self.As = sparse(self.As)\n # I = np.array(self.As.I).reshape((-1,))\n # J = np.array(self.As.J).reshape((-1,))\n # V = np.array(self.As.V).reshape((-1,))\n # self.As = csr_matrix((V, (I, J)), shape=self.As.size)\n # ------------------------------------------------------\n return self.As", "response": "Calculates the state matrix and stores to self. As"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncalculate the eigenvalues of the state matrix.", "response": "def calc_eigvals(self):\n \"\"\"\n Solve eigenvalues of the state matrix ``self.As``\n\n Returns\n -------\n None\n \"\"\"\n self.eigs = numpy.linalg.eigvals(self.As)\n # TODO: use scipy.sparse.linalg.eigs(self.As)\n\n return self.eigs"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef calc_part_factor(self):\n mu, N = numpy.linalg.eig(self.As)\n # TODO: use scipy.sparse.linalg.eigs(self.As)\n\n N = matrix(N)\n n = len(mu)\n idx = range(n)\n\n W = matrix(spmatrix(1.0, idx, idx, (n, n), N.typecode))\n gesv(N, W)\n\n partfact = mul(abs(W.T), abs(N))\n\n b = matrix(1.0, (1, n))\n WN = b * partfact\n partfact = partfact.T\n\n for item in idx:\n mu_real = mu[item].real\n mu_imag = mu[item].imag\n mu[item] = complex(round(mu_real, 4), round(mu_imag, 4))\n partfact[item, :] /= WN[item]\n\n # participation factor\n self.mu = matrix(mu)\n self.part_fact = matrix(partfact)\n\n return self.mu, self.part_fact", "response": "Compute the participation factor of states in eigenvalues\n Returns ------- self. mu self. part_fact"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef dump_results(self):\n system = self.system\n mu = self.mu\n partfact = self.part_fact\n\n if system.files.no_output:\n return\n\n text = []\n header = []\n rowname = []\n data = []\n\n neig = len(mu)\n mu_real = mu.real()\n mu_imag = mu.imag()\n npositive = sum(1 for x in mu_real if x > 0)\n nzero = sum(1 for x in mu_real if x == 0)\n nnegative = sum(1 for x in mu_real if x < 0)\n\n numeral = []\n for idx, item in enumerate(range(neig)):\n if mu_real[idx] == 0:\n marker = '*'\n elif mu_real[idx] > 0:\n marker = '**'\n else:\n marker = ''\n numeral.append('#' + str(idx + 1) + marker)\n\n # compute frequency, undamped frequency and damping\n freq = [0] * neig\n ufreq = [0] * neig\n damping = [0] * neig\n for idx, item in enumerate(mu):\n if item.imag == 0:\n freq[idx] = 0\n ufreq[idx] = 0\n damping[idx] = 0\n else:\n freq[idx] = abs(item) / 2 / pi\n ufreq[idx] = abs(item.imag / 2 / pi)\n damping[idx] = -div(item.real, abs(item)) * 100\n\n # obtain most associated variables\n var_assoc = []\n for prow in range(neig):\n temp_row = partfact[prow, :]\n name_idx = list(temp_row).index(max(temp_row))\n var_assoc.append(system.varname.unamex[name_idx])\n\n pf = []\n for prow in range(neig):\n temp_row = []\n for pcol in range(neig):\n temp_row.append(round(partfact[prow, pcol], 5))\n pf.append(temp_row)\n\n text.append(system.report.info)\n header.append([''])\n rowname.append(['EIGENVALUE ANALYSIS REPORT'])\n data.append('')\n\n text.append('STATISTICS\\n')\n header.append([''])\n rowname.append(['Positives', 'Zeros', 'Negatives'])\n data.append([npositive, nzero, nnegative])\n\n text.append('EIGENVALUE DATA\\n')\n header.append([\n 'Most Associated', 'Real', 'Imag', 'Damped Freq.', 'Frequency',\n 'Damping [%]'\n ])\n rowname.append(numeral)\n data.append(\n [var_assoc,\n list(mu_real),\n list(mu_imag), ufreq, freq, damping])\n\n cpb = 7 # columns per block\n nblock = int(ceil(neig / cpb))\n\n if nblock <= 100:\n for idx in range(nblock):\n start = cpb * idx\n end = cpb * (idx + 1)\n text.append('PARTICIPATION FACTORS [{}/{}]\\n'.format(\n idx + 1, nblock))\n header.append(numeral[start:end])\n rowname.append(system.varname.unamex)\n data.append(pf[start:end])\n\n dump_data(text, header, rowname, data, system.files.eig)\n logger.info('report saved.')", "response": "Save eigenvalue analysis reports and return a list of strings."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn all the action times and times - 1e - 6 in a list", "response": "def get_times(self):\n \"\"\"Return all the action times and times-1e-6 in a list\"\"\"\n if not self.n:\n return []\n self.times = list(mul(self.u1, self.t1)) + \\\n list(mul(self.u2, self.t2)) + \\\n list(mul(self.u3, self.t3)) + \\\n list(mul(self.u4, self.t4))\n\n self.times = matrix(list(set(self.times)))\n self.times = list(self.times) + list(self.times - 1e-6)\n return self.times"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef send(self, ws, seq):\n\n payload = {'op': 1, 'd': seq}\n payload = json.dumps(payload)\n logger.debug(\"Sending heartbeat with payload {}\".format(payload))\n ws.send(payload)\n return", "response": "Sends a heartbeat message to the Discord\n websocket"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsends a message to a Discord channel or user", "response": "def create_message(self, channel_id, text):\n \"\"\"\n Sends a message to a Discord channel or user via REST API\n\n Args:\n channel_id (string): ID of destingation Discord channel\n text (string): Content of message\n \"\"\"\n\n baseurl = self.rest_baseurl + \\\n '/channels/{}/messages'.format(channel_id)\n requests.post(baseurl,\n headers=self.headers,\n data=json.dumps({'content': text}))"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef identify(self, token):\n\n payload = {\n 'op': 2,\n 'd': {\n 'token': self.token,\n 'properties': {\n '$os': sys.platform,\n '$browser': 'legobot',\n '$device': 'legobot'\n },\n 'compress': False,\n 'large_threshold': 250\n }\n }\n payload['d']['synced_guilds'] = []\n logger.info(\"Identifying with the following message: \\\n {}\".format(payload))\n self.ws.send(json.dumps(payload))\n return", "response": "Identifies the current user with the given token."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef on_hello(self, message):\n\n logger.info(\"Got a hello\")\n self.identify(self.token)\n self.heartbeat_thread = Heartbeat(self.ws,\n message['d']['heartbeat_interval'])\n self.heartbeat_thread.start()\n return", "response": "Runs on a hello event from Discord websocket connection"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef on_heartbeat(self, message):\n\n logger.info(\"Got a heartbeat\")\n logger.info(\"Heartbeat message: {}\".format(message))\n self.heartbeat_thread.update_sequence(message['d'])\n return", "response": "Runs on a heartbeat event from Discord websocket connection"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nruns on a create_message event from Discord websocket connection", "response": "def on_message(self, message):\n \"\"\"\n Runs on a create_message event from websocket connection\n\n Args:\n message (dict): Full message from Discord websocket connection\"\n \"\"\"\n\n if 'content' in message['d']:\n metadata = self._parse_metadata(message)\n message = Message(text=message['d']['content'],\n metadata=metadata).__dict__\n logger.debug(message)\n self.baseplate.tell(message)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _parse_metadata(self, message):\n\n metadata = Metadata(source=self.actor_urn).__dict__\n if 'author' in message['d']:\n metadata['source_user'] = message['d']['author']['username']\n else:\n metadata['source_user'] = None\n if 'channel_id' in message['d']:\n metadata['source_channel'] = message['d']['channel_id']\n else:\n metadata['source_channel'] = None\n metadata['user_id'] = metadata['source_user']\n metadata['display_name'] = metadata['source_user']\n\n metadata['source_connector'] = 'discord'\n\n return metadata", "response": "Parses the metadata from a Discord websocket connection"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ndispatch messages to appropriate handler based on opcode", "response": "def handle(self, message):\n \"\"\"\n Dispatches messages to appropriate handler based on opcode\n\n Args:\n message (dict): Full message from Discord websocket connection\n \"\"\"\n\n opcode = message['op']\n if opcode == 10:\n self.on_hello(message)\n elif opcode == 11:\n self.on_heartbeat(message)\n elif opcode == 0:\n self.on_message(message)\n else:\n logger.debug(\"Not a message we handle: OPCODE {}\".format(opcode))\n return"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef run(self):\n\n self.ws = self.connect()\n while True:\n try:\n data = json.loads(self.ws.recv())\n self.handle(data)\n except json.decoder.JSONDecodeError as e:\n logger.fatal(\"No data on socket...\")", "response": "Overrides run method of threading.Thread.\n Called by DiscoBot.start(), inherited from threading.Thread"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nhandle a message from the Legobot server.", "response": "def handle(self, message):\n '''\n Attempts to send a message to the specified destination in Discord.\n Extends Legobot.Lego.handle()\n\n Args:\n message (Legobot.Message): message w/ metadata to send.\n '''\n\n logger.debug(message)\n if Utilities.isNotEmpty(message['metadata']['opts']):\n target = message['metadata']['opts']['target']\n self.botThread.create_message(target, message['text'])"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef resize(self):\n yext = self.m - len(self.y)\n xext = self.n - len(self.x)\n if yext > 0:\n yzeros = zeros(yext, 1)\n yones = ones(yext, 1)\n self.y = matrix([self.y, yzeros], (self.m, 1), 'd')\n self.g = matrix([self.g, yzeros], (self.m, 1), 'd')\n self.uy = matrix([self.uy, yones], (self.m, 1), 'd')\n self.zymin = matrix([self.zymin, yones], (self.m, 1), 'd')\n self.zymax = matrix([self.zymax, yones], (self.m, 1), 'd')\n if xext > 0:\n xzeros = zeros(xext, 1)\n xones = ones(xext, 1)\n self.x = matrix([self.x, xzeros], (self.n, 1), 'd')\n self.f = matrix([self.f, xzeros], (self.n, 1), 'd')\n self.ux = matrix([self.ux, xones], (self.n, 1), 'd')\n self.zxmin = matrix([self.zxmin, xones], (self.n, 1), 'd')\n self.zxmax = matrix([self.zxmax, xones], (self.n, 1), 'd')", "response": "Resize dae and extend for init1 variables\n "} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting the hard limits for the algebraic variables and reset the equation mismatches .", "response": "def hard_limit(self, yidx, ymin, ymax, min_set=None, max_set=None):\n \"\"\"Set hard limits for algebraic variables and reset the equation mismatches\n\n :param yidx: algebraic variable indices\n :param ymin: lower limit to check for\n :param ymax: upper limit to check for\n :param min_set: optional lower limit to set (``ymin`` as default)\n :param max_set: optional upper limit to set (``ymax`` as default)\n\n :type yidx: list, matrix\n :type ymin: matrix, int, float, list\n :type ymax: matrix, int, float, list\n :type min_set: matrix\n :type max_set: matrix\n\n :return: None\n\n \"\"\"\n yidx = matrix(yidx)\n yval = self.y[yidx]\n ny = len(yidx)\n\n if isinstance(ymin, (int, float, list)):\n ymin = matrix(ymin, (ny, 1), 'd')\n if isinstance(ymax, (int, float, list)):\n ymax = matrix(ymax, (ny, 1), 'd')\n\n if not min_set:\n min_set = ymin\n elif isinstance(min_set, (int, float, list)):\n min_set = matrix(min_set, (ny, 1), 'd')\n\n if not max_set:\n max_set = ymax\n elif isinstance(max_set, (int, float, list)):\n max_set = matrix(max_set, (ny, 1), 'd')\n\n above = ageb(yval, ymax)\n below = aleb(yval, ymin)\n\n above_idx = index(above, 1.0)\n below_idx = index(below, 1.0)\n\n above_yidx = yidx[above_idx]\n below_yidx = yidx[below_idx]\n idx = list(above_idx) + list(below_idx)\n\n if len(above_yidx) > 0:\n self.y[above_yidx] = max_set[above_idx]\n self.zymax[above_yidx] = 0\n\n if len(below_yidx) > 0:\n self.y[below_yidx] = min_set[below_idx]\n self.zymin[below_yidx] = 0\n\n if len(idx):\n self.g[yidx[idx]] = 0\n self.ac_reset = True"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef hard_limit_remote(self,\n yidx,\n ridx,\n rtype='y',\n rmin=None,\n rmax=None,\n min_yset=0,\n max_yset=0):\n \"\"\"Limit the output of yidx if the remote y is not within the limits\n\n This function needs to be modernized.\n \"\"\"\n ny = len(yidx)\n assert ny == len(\n ridx), \"Length of output vars and remote vars does not match\"\n assert rtype in ('x',\n 'y'), \"ridx must be either y (algeb) or x (state)\"\n\n if isinstance(min_yset, (int, float)):\n min_yset = matrix(min_yset, (ny, 1), 'd')\n if isinstance(max_yset, (int, float)):\n max_yset = matrix(max_yset, (ny, 1), 'd')\n\n above_idx, below_idx = list(), list()\n yidx = matrix(yidx)\n\n if rmax:\n # find the over-limit remote idx\n above = ageb(self.__dict__[rtype][ridx], rmax)\n above_idx = index(above, 1.0)\n # reset the y values based on the remote limit violations\n self.y[yidx[above_idx]] = max_yset[above_idx]\n self.zymax[yidx[above_idx]] = 0\n\n if rmin:\n below = aleb(self.__dict__[rtype][ridx], rmin)\n below_idx = index(below, 1.0)\n self.y[yidx[below_idx]] = min_yset[below_idx]\n self.zymin[yidx[below_idx]] = 0\n\n idx = above_idx + below_idx\n self.g[yidx[idx]] = 0\n\n if len(idx) > 0:\n self.factorize = True", "response": "hard limit the output of yidx if the remote y is not within the limits\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nresetting the dae. Ac sparse matrix for disabled equations.", "response": "def reset_Ac(self):\n \"\"\"\n Reset ``dae.Ac`` sparse matrix for disabled equations\n due to hard_limit and anti_windup limiters.\n\n :return: None\n \"\"\"\n if self.ac_reset is False:\n return\n\n mn = self.m + self.n\n\n x = index(aandb(self.zxmin, self.zxmax), 0.)\n y = [i + self.n for i in index(aandb(self.zymin, self.zymax), 0.)]\n xy = list(x) + y\n\n eye = spdiag([1.0] * mn)\n H = spmatrix(1.0, xy, xy, (mn, mn), 'd')\n\n # Modifying ``eye`` is more efficient than ``eye = eye - H``.\n # CVXOPT modifies eye in place because all the accessed elements exist.\n\n for idx in xy:\n eye[idx, idx] = 0\n\n if len(xy) > 0:\n self.Ac = eye * (self.Ac * eye) - H\n self.q[x] = 0\n\n self.ac_reset = False\n self.factorize = True"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the 2 - D size of a Jacobian matrix in tuple", "response": "def get_size(self, m):\n \"\"\"\n Return the 2-D size of a Jacobian matrix in tuple\n \"\"\"\n nrow, ncol = 0, 0\n if m[0] == 'F':\n nrow = self.n\n elif m[0] == 'G':\n nrow = self.m\n\n if m[1] == 'x':\n ncol = self.n\n elif m[1] == 'y':\n ncol = self.m\n\n return nrow, ncol"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nadding a vector of values to the Jacobian matrix m.", "response": "def add_jac(self, m, val, row, col):\n \"\"\"Add tuples (val, row, col) to the Jacobian matrix ``m``\n\n Implemented in numpy.arrays for temporary storage.\n \"\"\"\n assert m in ('Fx', 'Fy', 'Gx', 'Gy', 'Fx0', 'Fy0', 'Gx0', 'Gy0'), \\\n 'Wrong Jacobian matrix name <{0}>'.format(m)\n\n if isinstance(val, (int, float)):\n val = val * ones(len(row), 1)\n\n self._temp[m]['I'] = matrix([self._temp[m]['I'], matrix(row)])\n self._temp[m]['J'] = matrix([self._temp[m]['J'], matrix(col)])\n self._temp[m]['V'] = matrix([self._temp[m]['V'], matrix(val)])"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef temp_to_spmatrix(self, ty):\n assert ty in ('jac0', 'jac')\n\n jac0s = ['Fx0', 'Fy0', 'Gx0', 'Gy0']\n jacs = ['Fx', 'Fy', 'Gx', 'Gy']\n\n if ty == 'jac0':\n todo = jac0s\n elif ty == 'jac':\n todo = jacs\n\n for m in todo:\n self.__dict__[m] = spmatrix(self._temp[m]['V'],\n self._temp[m]['I'], self._temp[m]['J'],\n self.get_size(m), 'd')\n if ty == 'jac':\n self.__dict__[m] += self.__dict__[m + '0']\n\n self.apply_set(ty)", "response": "Convert Jacobian tuples to matrices"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nset the values at row col in Jacobian m to val.", "response": "def set_jac(self, m, val, row, col):\n \"\"\"\n Set the values at (row, col) to val in Jacobian m\n\n :param m: Jacobian name\n :param val: values to set\n :param row: row indices\n :param col: col indices\n :return: None\n \"\"\"\n assert m in ('Fx', 'Fy', 'Gx', 'Gy', 'Fx0', 'Fy0', 'Gx0', 'Gy0'), \\\n 'Wrong Jacobian matrix name <{0}>'.format(m)\n\n if isinstance(val,\n (int, float)) and isinstance(row,\n (np.ndarray, matrix, list)):\n val = val * ones(len(row), 1)\n\n self._set[m]['I'] = matrix([self._set[m]['I'], matrix(row)])\n self._set[m]['J'] = matrix([self._set[m]['J'], matrix(col)])\n self._set[m]['V'] = matrix([self._set[m]['V'], matrix(val)])"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\napplies Jacobian set values to matrices.", "response": "def apply_set(self, ty):\n \"\"\"\n Apply Jacobian set values to matrices\n\n :param ty: Jacobian type in ``('jac0', 'jac')``\n :return:\n \"\"\"\n assert ty in ('jac0', 'jac')\n\n if ty == 'jac0':\n todo = ['Fx0', 'Fy0', 'Gx0', 'Gy0']\n else:\n todo = ['Fx', 'Fy', 'Gx', 'Gy']\n\n for m in todo:\n for idx in range(len(self._set[m]['I'])):\n i = self._set[m]['I'][idx]\n j = self._set[m]['J'][idx]\n v = self._set[m]['V'][idx]\n self.__dict__[m][i, j] = v"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef show(self, eq, value=None):\n if eq in ['f', 'x']:\n key = 'unamex'\n elif eq in ['g', 'y']:\n key = 'unamey'\n\n if value:\n value = list(value)\n else:\n value = list(self.__dict__[eq])\n\n out = ''\n for name, val, idx in zip(self.system.varname.__dict__[key], value,\n range(len(value))):\n out += '{:20s} [{:>12.4f}] {:g}\\n'.format(name, val, idx)\n return out", "response": "Show the names of the variable in the system."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef find_val(self, eq, val):\n if eq not in ('f', 'g', 'q'):\n return\n elif eq in ('f', 'q'):\n key = 'unamex'\n elif eq == 'g':\n key = 'unamey'\n idx = 0\n for m, n in zip(self.system.varname.__dict__[key], self.__dict__[eq]):\n if n == val:\n return m, idx\n idx += 1\n return", "response": "Return the name of the equation having the given value"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nresetting numbers smaller than 1e - 12 in f and g equations", "response": "def reset_small(self, eq):\n \"\"\"Reset numbers smaller than 1e-12 in f and g equations\"\"\"\n assert eq in ('f', 'g')\n for idx, var in enumerate(self.__dict__[eq]):\n if abs(var) <= 1e-12:\n self.__dict__[eq][idx] = 0"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef check_diag(self, jac, name):\n system = self.system\n pos = []\n names = []\n pairs = ''\n size = jac.size\n diag = jac[0:size[0] ** 2:size[0] + 1]\n\n for idx in range(size[0]):\n if abs(diag[idx]) <= 1e-8:\n pos.append(idx)\n\n for idx in pos:\n names.append(system.varname.__dict__[name][idx])\n\n if len(names) > 0:\n for i, j in zip(pos, names):\n pairs += '{0}: {1}\\n'.format(i, j)\n logger.debug('Jacobian diagonal check:')\n logger.debug(pairs)", "response": "Check matrix jac for diagonal elements that equals 0"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn full file path filename line number function name and line contents of the last exception.", "response": "def get_exception_source():\n \"\"\"Returns full file path, file name, line number, function name, and line contents\n causing the last exception.\"\"\"\n _, _, tb = sys.exc_info()\n while tb.tb_next:\n tb = tb.tb_next\n f = tb.tb_frame\n lineno = tb.tb_lineno\n co = f.f_code\n filefullpath = co.co_filename\n filename = os.path.basename(filefullpath)\n name = co.co_name\n linecache.checkcache(filefullpath)\n line = linecache.getline(filefullpath, lineno, f.f_globals)\n if line:\n line = line.strip()\n else:\n line = None\n return filefullpath, filename, lineno, name, line"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\npreparing a record for queuing.", "response": "def prepare(self, record):\n # Function taken from Python 3.6 QueueHandler\n \"\"\"\n Prepares a record for queuing. The object returned by this method is\n enqueued.\n The base implementation formats the record to merge the message\n and arguments, and removes unpickleable items from the record\n in-place.\n You might want to override this method if you want to convert\n the record to a dict or JSON string, or send a modified copy\n of the record while leaving the original intact.\n \"\"\"\n # The format operation gets traceback text into record.exc_text\n # (if there's exception data), and also puts the message into\n # record.message. We can then use this to replace the original\n # msg + args, as these might be unpickleable. We also zap the\n # exc_info attribute, as it's no longer needed and, if not None,\n # will typically not be pickleable.\n self.format(record)\n record.msg = record.message\n record.args = None\n record.exc_info = None\n return record"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsending a LogRecord to the callback function after preparing it for serialization.", "response": "def emit(self, record):\n \"\"\"Send a LogRecord to the callback function, after preparing it\n for serialization.\"\"\"\n try:\n self._callback(self.prepare(record))\n except Exception:\n self.handleError(record)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nupdate the status of the service.", "response": "def update_status(self, status_code=None):\n \"\"\"Update the service status kept inside the frontend (_service_status).\n The status is broadcast over the network immediately. If the status\n changes to IDLE then this message is delayed. The IDLE status is only\n broadcast if it is held for over 0.5 seconds.\n When the status does not change it is still broadcast every\n _status_interval seconds.\n :param status_code: Either an integer describing the service status\n (see workflows.services.common_service), or None\n if the status is unchanged.\n \"\"\"\n if status_code is not None:\n self._service_status = status_code\n self._utilization.update_status(status_code)\n\n # Check whether IDLE status should be delayed\n if self._service_status == CommonService.SERVICE_STATUS_IDLE:\n if self._status_idle_since is None:\n self._status_idle_since = time.time()\n return\n elif self._status_idle_since + 0.5 > time.time():\n return\n else:\n self._status_idle_since = None\n\n new_status = self._service_status != self._service_status_announced\n if (\n (\n new_status\n or self._status_last_broadcast + self._status_interval <= time.time()\n )\n and self._transport\n and self._transport.is_connected()\n ):\n self._service_status_announced = self._service_status\n self._transport.broadcast_status(self.get_status())\n self._status_last_broadcast = time.time()"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef run(self):\n self.log.debug(\"Entered main loop\")\n while not self.shutdown:\n # If no service is running slow down the main loop\n if not self._pipe_service:\n time.sleep(0.3)\n self.update_status()\n # While a service is running, check for incoming messages from that service\n if self._pipe_service and self._pipe_service.poll(1):\n try:\n message = self._pipe_service.recv()\n if isinstance(message, dict) and \"band\" in message:\n # only dictionaries with 'band' entry are valid messages\n try:\n handler = getattr(self, \"parse_band_\" + message[\"band\"])\n except AttributeError:\n handler = None\n self.log.warning(\"Unknown band %s\", str(message[\"band\"]))\n if handler:\n # try:\n handler(message)\n # except Exception:\n # print('Uh oh. What to do.')\n else:\n self.log.warning(\"Invalid message received %s\", str(message))\n except EOFError:\n # Service has gone away\n error_message = False\n if self._service_status == CommonService.SERVICE_STATUS_END:\n self.log.info(\"Service terminated\")\n elif self._service_status == CommonService.SERVICE_STATUS_ERROR:\n error_message = \"Service terminated with error code\"\n elif self._service_status in (\n CommonService.SERVICE_STATUS_NONE,\n CommonService.SERVICE_STATUS_NEW,\n CommonService.SERVICE_STATUS_STARTING,\n ):\n error_message = (\n \"Service may have died unexpectedly in \"\n + \"initialization (last known status: %s)\"\n % CommonService.human_readable_state.get(\n self._service_status, self._service_status\n )\n )\n else:\n error_message = (\n \"Service may have died unexpectedly\"\n \" (last known status: %s)\"\n % CommonService.human_readable_state.get(\n self._service_status, self._service_status\n )\n )\n if error_message:\n self.log.error(error_message)\n self._terminate_service()\n if self.restart_service:\n self.exponential_backoff()\n else:\n self.shutdown = True\n if error_message:\n raise workflows.Error(error_message)\n\n with self.__lock:\n if (\n self._service is None\n and self.restart_service\n and self._service_factory\n ):\n self.update_status(status_code=CommonService.SERVICE_STATUS_NEW)\n self.switch_service()\n\n # Check that the transport is alive\n if not self._transport.is_connected():\n self._terminate_service()\n raise workflows.Error(\"Lost transport layer connection\")\n self.log.debug(\"Left main loop\")\n self.update_status(status_code=CommonService.SERVICE_STATUS_TEARDOWN)\n self._terminate_service()\n self.log.debug(\"Terminating.\")", "response": "The main loop of the frontend. Here incoming messages from the service are processed and forwarded to the corresponding callback methods."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef send_command(self, command):\n if self._pipe_commands:\n self._pipe_commands.send(command)\n else:\n if self.shutdown:\n # Stop delivering messages in shutdown.\n self.log.info(\n \"During shutdown no command queue pipe found for command\\n%s\",\n str(command),\n )\n else:\n self.log.error(\n \"No command queue pipe found for command\\n%s\", str(command)\n )", "response": "Send a command to the service via the command queue."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nparse a transport command message from the transport layer", "response": "def process_transport_command(self, header, message):\n \"\"\"Parse a command coming in through the transport command subscription\"\"\"\n if not isinstance(message, dict):\n return\n\n relevant = False\n if \"host\" in message: # Filter by host\n if message[\"host\"] != self.__hostid:\n return\n relevant = True\n if \"service\" in message: # Filter by service\n if message[\"service\"] != self._service_class_name:\n return\n relevant = True\n if not relevant: # Ignore message unless at least one filter matches\n return\n\n if message.get(\"command\"):\n self.log.info(\n \"Received command '%s' via transport layer\", message[\"command\"]\n )\n if message[\"command\"] == \"shutdown\":\n self.shutdown = True\n else:\n self.log.warning(\"Received invalid transport command message\")"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef parse_band_log(self, message):\n if \"payload\" in message and hasattr(message[\"payload\"], \"name\"):\n record = message[\"payload\"]\n for k in dir(record):\n if k.startswith(\"workflows_exc_\"):\n setattr(record, k[14:], getattr(record, k))\n delattr(record, k)\n for k, v in self.get_status().items():\n setattr(record, \"workflows_\" + k, v)\n logging.getLogger(record.name).handle(record)\n else:\n self.log.warning(\n \"Received broken record on log band\\n\" + \"Message: %s\\nRecord: %s\",\n str(message),\n str(\n hasattr(message.get(\"payload\"), \"__dict__\")\n and message[\"payload\"].__dict__\n ),\n )", "response": "Process incoming log messages from the service."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nprocesses a termination message.", "response": "def parse_band_request_termination(self, message):\n \"\"\"Service declares it should be terminated.\"\"\"\n self.log.debug(\"Service requests termination\")\n self._terminate_service()\n if not self.restart_service:\n self.shutdown = True"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef parse_band_set_name(self, message):\n if message.get(\"name\"):\n self._service_name = message[\"name\"]\n else:\n self.log.warning(\n \"Received broken record on set_name band\\nMessage: %s\", str(message)\n )", "response": "Process incoming message indicating service name change."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nprocesses incoming status updates from the service.", "response": "def parse_band_status_update(self, message):\n \"\"\"Process incoming status updates from the service.\"\"\"\n self.log.debug(\"Status update: \" + str(message))\n self.update_status(status_code=message[\"statuscode\"])"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_status(self):\n return {\n \"host\": self.__hostid,\n \"status\": self._service_status_announced,\n \"statustext\": CommonService.human_readable_state.get(\n self._service_status_announced\n ),\n \"service\": self._service_name,\n \"serviceclass\": self._service_class_name,\n \"utilization\": self._utilization.report(),\n \"workflows\": workflows.version(),\n }", "response": "Returns a dictionary containing all relevant status information to be\n broadcast across the network across the network."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nstart a new service in a subprocess.", "response": "def switch_service(self, new_service=None):\n \"\"\"Start a new service in a subprocess.\n :param new_service: Either a service name or a service class. If not set,\n start up a new instance of the previous class\n :return: True on success, False on failure.\n \"\"\"\n if new_service:\n self._service_factory = new_service\n with self.__lock:\n # Terminate existing service if necessary\n if self._service is not None:\n self._terminate_service()\n\n # Find service class if necessary\n if isinstance(self._service_factory, basestring):\n self._service_factory = workflows.services.lookup(self._service_factory)\n if not self._service_factory:\n return False\n\n # Set up new service object\n service_instance = self._service_factory(\n environment=self._service_environment\n )\n\n # Set up pipes and connect service object\n svc_commands, self._pipe_commands = multiprocessing.Pipe(False)\n self._pipe_service, svc_tofrontend = multiprocessing.Pipe(False)\n service_instance.connect(commands=svc_commands, frontend=svc_tofrontend)\n\n # Set up transport layer for new service\n service_instance.transport = self._transport_factory()\n\n # Start new service in a separate process\n self._service = multiprocessing.Process(\n target=service_instance.start,\n args=(),\n kwargs={\"verbose_log\": self._verbose_service},\n )\n self._service_name = service_instance.get_name()\n self._service_class_name = service_instance.__class__.__name__\n self._service.daemon = True\n self._service.name = \"workflows-service\"\n self._service.start()\n self._service_starttime = time.time()\n\n # Starting the process copies all file descriptors.\n # At this point (and no sooner!) the passed pipe objects must be closed\n # in this process here.\n svc_commands.close()\n svc_tofrontend.close()\n self.log.info(\"Started service: %s\", self._service_name)\n return True"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _terminate_service(self):\n with self.__lock:\n if self._service:\n self._service.terminate()\n if self._pipe_commands:\n self._pipe_commands.close()\n if self._pipe_service:\n self._pipe_service.close()\n self._pipe_commands = None\n self._pipe_service = None\n self._service_class_name = None\n self._service_name = None\n if self._service_status != CommonService.SERVICE_STATUS_TEARDOWN:\n self.update_status(status_code=CommonService.SERVICE_STATUS_END)\n if self._service:\n self._service.join() # must wait for process to be actually destroyed\n self._service = None", "response": "Force termination of running service."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef symbolic(self, A):\n\n if self.sparselib == 'umfpack':\n return umfpack.symbolic(A)\n\n elif self.sparselib == 'klu':\n return klu.symbolic(A)", "response": "Returns the symbolic factorization of a sparse matrix A."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the numeric factorization of sparse matrix A using symbolic factorization F.", "response": "def numeric(self, A, F):\n \"\"\"\n Return the numeric factorization of sparse matrix ``A`` using symbolic factorization ``F``\n\n Parameters\n ----------\n A\n Sparse matrix\n F\n Symbolic factorization\n\n Returns\n -------\n N\n Numeric factorization of ``A``\n \"\"\"\n if self.sparselib == 'umfpack':\n return umfpack.numeric(A, F)\n\n elif self.sparselib == 'klu':\n return klu.numeric(A, F)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsolves linear system Ax = b using numeric factorization N and symbolic factorization F and store the solution in b.", "response": "def solve(self, A, F, N, b):\n \"\"\"\n Solve linear system ``Ax = b`` using numeric factorization ``N`` and symbolic factorization ``F``.\n Store the solution in ``b``.\n\n Parameters\n ----------\n A\n Sparse matrix\n F\n Symbolic factorization\n N\n Numeric factorization\n b\n RHS of the equation\n\n Returns\n -------\n None\n \"\"\"\n if self.sparselib == 'umfpack':\n umfpack.solve(A, N, b)\n\n elif self.sparselib == 'klu':\n klu.solve(A, F, N, b)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef linsolve(self, A, b):\n if self.sparselib == 'umfpack':\n return umfpack.linsolve(A, b)\n\n elif self.sparselib == 'klu':\n return klu.linsolve(A, b)", "response": "Solve linear equation set A and store the solutions in b."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncustomizes varname for bus injections", "response": "def _varname_inj(self):\n \"\"\"Customize varname for bus injections\"\"\"\n # Bus Pi\n if not self.n:\n return\n m = self.system.dae.m\n xy_idx = range(m, self.n + m)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='P',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='P',\n element_name=self.name)\n\n # Bus Qi\n xy_idx = range(m + self.n, m + 2 * self.n)\n self.system.varname.append(\n listname='unamey',\n xy_idx=xy_idx,\n var_name='Q',\n element_name=self.name)\n self.system.varname.append(\n listname='fnamey',\n xy_idx=xy_idx,\n var_name='Q',\n element_name=self.name)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef init0(self, dae):\n if not self.system.pflow.config.flatstart:\n dae.y[self.a] = self.angle + 1e-10 * uniform(self.n)\n dae.y[self.v] = self.voltage\n else:\n dae.y[self.a] = matrix(0.0,\n (self.n, 1), 'd') + 1e-10 * uniform(self.n)\n dae.y[self.v] = matrix(1.0, (self.n, 1), 'd')", "response": "Set bus voltage and voltage initial values"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef gisland(self, dae):\n if (not self.islanded_buses) and (not self.island_sets):\n return\n\n a, v = list(), list()\n\n # for islanded areas without a slack bus\n # TODO: fix for islanded sets without sw\n # for island in self.island_sets:\n # nosw = 1\n # for item in self.system.SW.bus:\n # if self.uid[item] in island:\n # nosw = 0\n # break\n # if nosw:\n # self.islanded_buses += island\n # self.island_sets.remove(island)\n\n a = self.islanded_buses\n v = [self.n + item for item in a]\n dae.g[a] = 0\n dae.g[v] = 0", "response": "Reset g x for islanded buses and areas"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nresets gy island for islanded buses and areas", "response": "def gyisland(self, dae):\n \"\"\"Reset gy(x) for islanded buses and areas\"\"\"\n if self.system.Bus.islanded_buses:\n a = self.system.Bus.islanded_buses\n v = [self.system.Bus.n + item for item in a]\n dae.set_jac(Gy, 1e-6, a, a)\n dae.set_jac(Gy, 1e-6, v, v)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nresetting g x for islanded buses and areas", "response": "def gisland(self, dae):\n \"\"\"Reset g(x) for islanded buses and areas\"\"\"\n if not (self.islanded_buses and self.island_sets):\n return\n\n a, v = list(), list()\n\n # for islanded areas without a slack bus\n for island in self.island_sets:\n nosw = 1\n for item in self.system.SW.bus:\n if self.uid[item] in island:\n nosw = 0\n break\n if nosw:\n self.islanded_buses += island\n self.island_sets.remove(island)\n\n a = self.islanded_buses\n v = [self.n + item for item in a]\n dae.g[a] = 0\n dae.g[v] = 0"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_known_transports():\n if not hasattr(get_known_transports, \"cache\"):\n setattr(\n get_known_transports,\n \"cache\",\n {\n e.name: e.load()\n for e in pkg_resources.iter_entry_points(\"workflows.transport\")\n },\n )\n return get_known_transports.cache.copy()", "response": "Return a dictionary of all known transport mechanisms."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef windspeed(self, t):\n ws = [0] * self.n\n\n for i in range(self.n):\n q = ceil(t / self.dt[i])\n q_prev = 0 if q == 0 else q - 1\n\n r = t % self.dt[i]\n r = 0 if abs(r) < 1e-6 else r\n\n if r == 0:\n ws[i] = self.speed[i][q]\n else:\n t1 = self.time[i][q_prev]\n s1 = self.speed[i][q_prev]\n s2 = self.speed[i][q]\n ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i]\n\n return matrix(ws)", "response": "Return the wind speed list at time t"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting value for self. vf0 and dae. y [ self. vf ]", "response": "def set_vf0(self, vf):\n \"\"\"set value for self.vf0 and dae.y[self.vf]\"\"\"\n self.vf0 = vf\n self.system.dae.y[self.vf] = matrix(vf)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef setup(self):\n\n self.devman.sort_device()\n self.call.setup()\n self.model_setup()\n self.xy_addr0()\n self.dae.setup()\n self.to_sysbase()\n\n return self", "response": "Set up the power system object by executing the following workflow"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nconvert model parameters to system base.", "response": "def to_sysbase(self):\n \"\"\"\n Convert model parameters to system base. This function calls the\n ``data_to_sys_base`` function of the loaded models.\n\n Returns\n -------\n None\n \"\"\"\n if self.config.base:\n for item in self.devman.devices:\n self.__dict__[item].data_to_sys_base()"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef to_elembase(self):\n if self.config.base:\n for item in self.devman.devices:\n self.__dict__[item].data_to_elem_base()", "response": "Convert parameters back to element base."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef model_import(self):\n # non-JIT models\n for file, pair in non_jits.items():\n for cls, name in pair.items():\n themodel = importlib.import_module('andes.models.' + file)\n theclass = getattr(themodel, cls)\n self.__dict__[name] = theclass(self, name)\n\n group = self.__dict__[name]._group\n self.group_add(group)\n self.__dict__[group].register_model(name)\n\n self.devman.register_device(name)\n\n # import JIT models\n for file, pair in jits.items():\n for cls, name in pair.items():\n self.__dict__[name] = JIT(self, file, cls, name)", "response": "Imports and instantiate the non - JIT models and the JIT models."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef routine_import(self):\n for r in routines.__all__:\n file = importlib.import_module('.' + r.lower(), 'andes.routines')\n self.__dict__[r.lower()] = getattr(file, r)(self)", "response": "Dynamically import routines as defined in modules and store the class name of each class in the system."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncalling the setup function of the loaded models.", "response": "def model_setup(self):\n \"\"\"\n Call the ``setup`` function of the loaded models. This function is\n to be called after parsing all the data files during the system set up.\n\n Returns\n -------\n None\n \"\"\"\n for device in self.devman.devices:\n if self.__dict__[device].n:\n try:\n self.__dict__[device].setup()\n except Exception as e:\n raise e"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef xy_addr0(self):\n for device, pflow in zip(self.devman.devices, self.call.pflow):\n if pflow:\n self.__dict__[device]._addr()\n self.__dict__[device]._intf_network()\n self.__dict__[device]._intf_ctrl()\n\n self.varname.resize()\n\n for device, pflow in zip(self.devman.devices, self.call.pflow):\n if pflow:\n self.__dict__[device]._varname()", "response": "Assign indicies and variable names for variables used in power flow to the first loaded model."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef rmgen(self, idx):\n stagens = []\n for device, stagen in zip(self.devman.devices, self.call.stagen):\n if stagen:\n stagens.append(device)\n for gen in idx:\n for stagen in stagens:\n if gen in self.__dict__[stagen].uid.keys():\n self.__dict__[stagen].disable_gen(gen)", "response": "Removes the static generators if their dynamic models exist."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nchecking for event occurrance for Event group models at sim_time.", "response": "def check_event(self, sim_time):\n \"\"\"\n Check for event occurrance for``Event`` group models at ``sim_time``\n\n Parameters\n ----------\n sim_time : float\n The current simulation time\n\n Returns\n -------\n list\n A list of model names who report (an) event(s) at ``sim_time``\n \"\"\"\n ret = []\n for model in self.__dict__['Event'].all_models:\n if self.__dict__[model].is_time(sim_time):\n ret.append(model)\n\n if self.Breaker.is_time(sim_time):\n ret.append('Breaker')\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nloading an Andes config file into the system and the routines.", "response": "def load_config(self, conf_path):\n \"\"\"\n Load config from an ``andes.conf`` file.\n\n This function creates a ``configparser.ConfigParser`` object to read\n the specified conf file and calls the ``load_config`` function of the\n config instances of the system and the routines.\n\n Parameters\n ----------\n conf_path : None or str\n Path to the Andes config file. If ``None``, the function body\n will not run.\n\n Returns\n -------\n None\n \"\"\"\n if conf_path is None:\n return\n\n conf = configparser.ConfigParser()\n conf.read(conf_path)\n\n self.config.load_config(conf)\n for r in routines.__all__:\n self.__dict__[r.lower()].config.load_config(conf)\n\n logger.debug('Loaded config file from {}.'.format(conf_path))"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ndumps system and routine configurations to an rc - formatted file.", "response": "def dump_config(self, file_path):\n \"\"\"\n Dump system and routine configurations to an rc-formatted file.\n\n Parameters\n ----------\n file_path : str\n path to the configuration file. The user will be prompted if the\n file already exists.\n\n Returns\n -------\n None\n \"\"\"\n if os.path.isfile(file_path):\n logger.debug('File {} alreay exist. Overwrite? [y/N]'.format(file_path))\n choice = input('File {} alreay exist. Overwrite? [y/N]'.format(file_path)).lower()\n if len(choice) == 0 or choice[0] != 'y':\n logger.info('File not overwritten.')\n return\n\n conf = self.config.dump_conf()\n for r in routines.__all__:\n conf = self.__dict__[r.lower()].config.dump_conf(conf)\n\n with open(file_path, 'w') as f:\n conf.write(f)\n\n logger.info('Config written to {}'.format(file_path))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef check_islands(self, show_info=False):\n if not hasattr(self, 'Line'):\n logger.error(' device not found.')\n return\n self.Line.connectivity(self.Bus)\n\n if show_info is True:\n\n if len(self.Bus.islanded_buses) == 0 and len(\n self.Bus.island_sets) == 0:\n logger.debug('System is interconnected.')\n else:\n logger.info(\n 'System contains {:d} islands and {:d} islanded buses.'.\n format(\n len(self.Bus.island_sets),\n len(self.Bus.islanded_buses)))\n\n nosw_island = [] # no slack bus island\n msw_island = [] # multiple slack bus island\n for idx, island in enumerate(self.Bus.island_sets):\n nosw = 1\n for item in self.SW.bus:\n if self.Bus.uid[item] in island:\n nosw -= 1\n if nosw == 1:\n nosw_island.append(idx)\n elif nosw < 0:\n msw_island.append(idx)\n\n if nosw_island:\n logger.warning(\n 'Slack bus is not defined for {:g} island(s).'.format(\n len(nosw_island)))\n if msw_island:\n logger.warning(\n 'Multiple slack buses are defined for {:g} island(s).'.\n format(len(nosw_island)))\n\n if (not nosw_island) and (not msw_island):\n logger.debug(\n 'Each island has a slack bus correctly defined.')", "response": "Checks the connectivity of the ac system and returns a dictionary of the islanded and slack buses."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_busdata(self, sort_names=False):\n if self.pflow.solved is False:\n logger.error('Power flow not solved when getting bus data.')\n return tuple([False] * 8)\n idx = self.Bus.idx\n names = self.Bus.name\n Vm = [self.dae.y[x] for x in self.Bus.v]\n if self.pflow.config.usedegree:\n Va = [self.dae.y[x] * rad2deg for x in self.Bus.a]\n else:\n Va = [self.dae.y[x] for x in self.Bus.a]\n\n Pg = [self.Bus.Pg[x] for x in range(self.Bus.n)]\n Qg = [self.Bus.Qg[x] for x in range(self.Bus.n)]\n Pl = [self.Bus.Pl[x] for x in range(self.Bus.n)]\n Ql = [self.Bus.Ql[x] for x in range(self.Bus.n)]\n\n if sort_names:\n ret = (list(x) for x in zip(*sorted(\n zip(idx, names, Vm, Va, Pg, Qg, Pl, Ql), key=itemgetter(0))))\n else:\n ret = idx, names, Vm, Va, Pg, Qg, Pl, Ql\n\n return ret", "response": "get ac bus data from solved power flow"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nget dc node data from solved power flow", "response": "def get_nodedata(self, sort_names=False):\n \"\"\"\n get dc node data from solved power flow\n \"\"\"\n if not self.Node.n:\n return\n if not self.pflow.solved:\n logger.error('Power flow not solved when getting bus data.')\n return tuple([False] * 7)\n idx = self.Node.idx\n names = self.Node.name\n V = [self.dae.y[x] for x in self.Node.v]\n\n if sort_names:\n ret = (list(x)\n for x in zip(*sorted(zip(idx, names, V), key=itemgetter(0))))\n else:\n ret = idx, names, V\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nget line data from solved power flow", "response": "def get_linedata(self, sort_names=False):\n \"\"\"\n get line data from solved power flow\n \"\"\"\n if not self.pflow.solved:\n logger.error('Power flow not solved when getting line data.')\n return tuple([False] * 7)\n idx = self.Line.idx\n fr = self.Line.bus1\n to = self.Line.bus2\n\n Sloss = self.Line.S1 + self.Line.S2\n\n Pfr = list(self.Line.S1.real())\n Qfr = list(self.Line.S1.imag())\n Pto = list(self.Line.S2.real())\n Qto = list(self.Line.S2.imag())\n\n Ploss = list(Sloss.real())\n Qloss = list(Sloss.imag())\n\n if sort_names:\n ret = (list(x) for x in zip(*sorted(\n zip(idx, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss),\n key=itemgetter(0))))\n else:\n ret = idx, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nregisters model to this group.", "response": "def register_model(self, model):\n \"\"\"\n Register ``model`` to this group\n\n :param model: model name\n :return: None\n \"\"\"\n\n assert isinstance(model, str)\n if model not in self.all_models:\n self.all_models.append(model)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef register_element(self, model, idx):\n\n if idx is None:\n idx = model + '_' + str(len(self._idx_model))\n\n self._idx_model[idx] = model\n self._idx.append(idx)\n\n return idx", "response": "Register element with index idx to model"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the field of elements idx in the group", "response": "def get_field(self, field, idx):\n \"\"\"\n Return the field ``field`` of elements ``idx`` in the group\n\n :param field: field name\n :param idx: element idx\n :return: values of the requested field\n \"\"\"\n ret = []\n scalar = False\n\n # TODO: ensure idx is unique in this Group\n\n if isinstance(idx, (int, float, str)):\n scalar = True\n idx = [idx]\n\n models = [self._idx_model[i] for i in idx]\n\n for i, m in zip(idx, models):\n ret.append(self.system.__dict__[m].get_field(field, idx=i))\n\n if scalar is True:\n return ret[0]\n else:\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nset the field of elements idx to value.", "response": "def set_field(self, field, idx, value):\n \"\"\"\n Set the field ``field`` of elements ``idx`` to ``value``.\n\n This function does not if the field is valid for all models.\n\n :param field: field name\n :param idx: element idx\n :param value: value of fields to set\n :return: None\n \"\"\"\n\n if isinstance(idx, (int, float, str)):\n idx = [idx]\n if isinstance(value, (int, float)):\n value = [value]\n\n models = [self._idx_model[i] for i in idx]\n\n for i, m, v in zip(idx, models, value):\n assert hasattr(self.system.__dict__[m], field)\n\n uid = self.system.__dict__[m].get_uid(idx)\n self.system.__dict__[m].__dict__[field][uid] = v"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef run(\n self,\n cmdline_args=None,\n program_name=\"start_service\",\n version=workflows.version(),\n **kwargs\n ):\n \"\"\"Example command line interface to start services.\n :param cmdline_args: List of command line arguments to pass to parser\n :param program_name: Name of the command line tool to display in help\n :param version: Version number to print when run with '--version'\n \"\"\"\n\n # Enumerate all known services\n known_services = workflows.services.get_known_services()\n\n # Set up parser\n parser = OptionParser(\n usage=program_name + \" [options]\" if program_name else None, version=version\n )\n parser.add_option(\"-?\", action=\"help\", help=SUPPRESS_HELP)\n parser.add_option(\n \"-s\",\n \"--service\",\n dest=\"service\",\n metavar=\"SVC\",\n default=None,\n help=\"Name of the service to start. Known services: \"\n + \", \".join(known_services),\n )\n parser.add_option(\n \"-t\",\n \"--transport\",\n dest=\"transport\",\n metavar=\"TRN\",\n default=\"StompTransport\",\n help=\"Transport mechanism. Known mechanisms: \"\n + \", \".join(workflows.transport.get_known_transports())\n + \" (default: %default)\",\n )\n workflows.transport.add_command_line_options(parser)\n\n # Call on_parser_preparation hook\n parser = self.on_parser_preparation(parser) or parser\n\n # Parse command line options\n (options, args) = parser.parse_args(cmdline_args)\n\n # Call on_parsing hook\n (options, args) = self.on_parsing(options, args) or (options, args)\n\n # Create Transport factory\n transport_factory = workflows.transport.lookup(options.transport)\n\n # Call on_transport_factory_preparation hook\n transport_factory = (\n self.on_transport_factory_preparation(transport_factory)\n or transport_factory\n )\n\n # Set up on_transport_preparation hook to affect newly created transport objects\n true_transport_factory_call = transport_factory.__call__\n\n def on_transport_preparation_hook():\n transport_object = true_transport_factory_call()\n return self.on_transport_preparation(transport_object) or transport_object\n\n transport_factory.__call__ = on_transport_preparation_hook\n\n # When service name is specified, check if service exists or can be derived\n if options.service and options.service not in known_services:\n matching = [s for s in known_services if s.startswith(options.service)]\n if not matching:\n matching = [\n s\n for s in known_services\n if s.lower().startswith(options.service.lower())\n ]\n if matching and len(matching) == 1:\n options.service = matching[0]\n\n kwargs.update({\"service\": options.service, \"transport\": transport_factory})\n\n # Call before_frontend_construction hook\n kwargs = self.before_frontend_construction(kwargs) or kwargs\n\n # Create Frontend object\n frontend = workflows.frontend.Frontend(**kwargs)\n\n # Call on_frontend_preparation hook\n frontend = self.on_frontend_preparation(frontend) or frontend\n\n # Start Frontend\n try:\n frontend.run()\n except KeyboardInterrupt:\n print(\"\\nShutdown via Ctrl+C\")", "response": "This function is called by the command line interface to start services."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_sort_field(request):\n sort_direction = request.GET.get(\"dir\")\n field_name = (request.GET.get(\"sort\") or \"\") if sort_direction else \"\"\n sort_sign = \"-\" if sort_direction == \"desc\" else \"\"\n result_field = \"{sign}{field}\".format(sign=sort_sign, field=field_name)\n return result_field", "response": "Retrieve field used for sorting a queryset\n "} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nparses an anchor field.", "response": "def anchor(parser, token):\n \"\"\"\n Parses a tag that's supposed to be in this format '{% anchor field title %}'\n Title may be a \"string\", _(\"trans string\"), or variable\n \"\"\"\n bits = [b for b in token.split_contents()]\n if len(bits) < 2:\n raise template.TemplateSyntaxError(\"anchor tag takes at least 1 argument.\")\n\n title_is_var = False\n try:\n title = bits[2]\n if title[0] in ('\"', \"'\"):\n if title[0] == title[-1]:\n title = title[1:-1]\n else:\n raise template.TemplateSyntaxError(\n 'anchor tag title must be a \"string\", _(\"trans string\"), or variable'\n )\n elif title.startswith('_(\"') or title.startswith(\"_('\"):\n title = _(title[3:-2])\n else:\n title_is_var = True\n except IndexError:\n title = bits[1].capitalize()\n\n return SortAnchorNode(bits[1].strip(), title.strip(), title_is_var)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmake an API call to the wiki.", "response": "def call(self, params):\n \"\"\"\n Make an API call to the wiki. *params* is a dictionary of query string\n arguments. For example, to get basic information about the wiki, run:\n\n >>> wiki.call({'action': 'query', 'meta': 'siteinfo'})\n\n which would make a call to\n ``http://domain/w/api.php?action=query&meta=siteinfo&format=json``\n (except the query string would be sent in POST).\n\n :param params: dictionary of query string parameters\n :returns: dictionary containing API response\n \"\"\"\n return json.loads(self._fetch_http(self._api_url, params))"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef normalize_api_url(self):\n def tester(self, api_url):\n \"\"\"\n Attempts to fetch general information about the MediaWiki instance\n in order to test whether *api_url* will return JSON.\n \"\"\"\n data = self._fetch_http(api_url, {'action': 'query',\n 'meta': 'siteinfo'})\n try:\n data_json = json.loads(data)\n return (data, data_json)\n except ValueError:\n return (data, None)\n\n data, data_json = tester(self, self._api_url)\n if data_json:\n return self._api_url\n else:\n # if there's an index.php in the URL, we might find the API\n if 'index.php' in self._api_url:\n test_api_url = self._api_url.split('index.php')[0] + 'api.php'\n test_data, test_data_json = tester(self, test_api_url)\n if test_data_json:\n self._api_url = test_api_url\n return self._api_url\n return None", "response": "Normalizes the API URL to be a valid one."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef login(self, user, passwd):\n def do_login(self, user, passwd, token=None):\n \"\"\"\n Login function that handles CSRF protection (see `MediaWiki bug\n 23076`_). Returns ``True`` on successful login.\n\n .. _`MediaWiki bug 23076`:\n https://bugzilla.wikimedia.org/show_bug.cgi?id=23076\n \"\"\"\n data = {'action': 'login',\n 'lgname': user,\n 'lgpassword': passwd}\n if token:\n data['lgtoken'] = token\n result = self.call(data)\n if result['login']['result'] == 'Success':\n self._high_limits = None\n return True\n elif result['login']['result'] == 'NeedToken' and not token:\n return do_login(self, user, passwd, result['login']['token'])\n else:\n return False\n\n return do_login(self, user, passwd)", "response": "Logs into the wiki with username user and password passwd. Returns True on successful login otherwise False."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a value that can be used to set the limits for a specific user - defined entry.", "response": "def limits(self, low, high):\n \"\"\"\n Convenience function for determining appropriate limits in the API. If\n the (usually logged-in) client has the ``apihighlimits`` right, it will\n return *high*; otherwise it will return *low*.\n\n It's generally a good idea to use the highest limit possible; this\n reduces the amount of HTTP requests and therefore overhead. Read the\n API documentation for details on the limits for the function you are\n using.\n\n :param low: value to return if client does not have ``apihighlimits``\n :param high: value to return if client has ``apihighlimits``\n :returns: *low* or *high*\n \"\"\"\n if self._high_limits is None:\n result = self.call({'action': 'query',\n 'meta': 'userinfo',\n 'uiprop': 'rights'})\n self._high_limits = 'apihighlimits' in \\\n result['query']['userinfo']['rights']\n if self._high_limits:\n return high\n else:\n return low"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nfetches a list of namespaces for this wiki and returns them as a dictionary of namespace IDs corresponding to namespace names. If psuedo is False the dictionary will also list psuedo - namespaces corresponding to namespace names.", "response": "def namespaces(self, psuedo=True):\n \"\"\"\n Fetches a list of namespaces for this wiki and returns them as a\n dictionary of namespace IDs corresponding to namespace names. If\n *psuedo* is ``True``, the dictionary will also list psuedo-namespaces,\n which are the \"Special:\" and \"Media:\" namespaces (special because they\n have no content associated with them and their IDs are negative).\n\n :param psuedo: boolean to determine inclusion of psuedo-namespaces\n :returns: dictionary of namespace IDs and names\n \"\"\"\n if self._namespaces is None:\n result = self.call({'action': 'query',\n 'meta': 'siteinfo',\n 'siprop': 'namespaces'})\n self._namespaces = {}\n self._psuedo_namespaces = {}\n for nsid in result['query']['namespaces']:\n if int(nsid) >= 0:\n self._namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n else:\n self._psuedo_namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n if psuedo:\n retval = {}\n retval.update(self._namespaces)\n retval.update(self._psuedo_namespaces)\n return retval\n else:\n return self._namespaces"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef readLocationElement(self, locationElement):\n if self._strictAxisNames and not self.documentObject.axes:\n raise DesignSpaceDocumentError(\"No axes defined\")\n loc = {}\n for dimensionElement in locationElement.findall(\".dimension\"):\n dimName = dimensionElement.attrib.get(\"name\")\n if self._strictAxisNames and dimName not in self.axisDefaults:\n # In case the document contains no axis definitions,\n self.log.warning(\"Location with undefined axis: \\\"%s\\\".\", dimName)\n continue\n xValue = yValue = None\n try:\n xValue = dimensionElement.attrib.get('xvalue')\n xValue = float(xValue)\n except ValueError:\n self.log.warning(\"KeyError in readLocation xValue %3.3f\", xValue)\n try:\n yValue = dimensionElement.attrib.get('yvalue')\n if yValue is not None:\n yValue = float(yValue)\n except ValueError:\n pass\n if yValue is not None:\n loc[dimName] = (xValue, yValue)\n else:\n loc[dimName] = xValue\n return loc", "response": "Read a location element and return it as a dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef build(\n documentPath,\n outputUFOFormatVersion=3,\n roundGeometry=True,\n verbose=True, # not supported\n logPath=None, # not supported\n progressFunc=None, # not supported\n processRules=True,\n logger=None,\n useVarlib=False,\n ):\n \"\"\"\n Simple builder for UFO designspaces.\n \"\"\"\n import os, glob\n if os.path.isdir(documentPath):\n # process all *.designspace documents in this folder\n todo = glob.glob(os.path.join(documentPath, \"*.designspace\"))\n else:\n # process the\n todo = [documentPath]\n results = []\n for path in todo:\n document = DesignSpaceProcessor(ufoVersion=outputUFOFormatVersion)\n document.useVarlib = useVarlib\n document.roundGeometry = roundGeometry\n document.read(path)\n try:\n r = document.generateUFO(processRules=processRules)\n results.append(r)\n except:\n if logger:\n logger.exception(\"ufoProcessor error\")\n #results += document.generateUFO(processRules=processRules)\n reader = None\n return results", "response": "Build a list of UFO designspaces from a UFO file."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns a info mutator for this font.", "response": "def getInfoMutator(self):\n \"\"\" Returns a info mutator \"\"\"\n if self._infoMutator:\n return self._infoMutator\n infoItems = []\n for sourceDescriptor in self.sources:\n if sourceDescriptor.layerName is not None:\n continue\n loc = Location(sourceDescriptor.location)\n sourceFont = self.fonts[sourceDescriptor.name]\n if sourceFont is None:\n continue\n if hasattr(sourceFont.info, \"toMathInfo\"):\n infoItems.append((loc, sourceFont.info.toMathInfo()))\n else:\n infoItems.append((loc, self.mathInfoClass(sourceFont.info)))\n bias, self._infoMutator = self.getVariationModel(infoItems, axes=self.serializedAxes, bias=self.newDefaultLocation())\n return self._infoMutator"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef getKerningMutator(self, pairs=None):\n if self._kerningMutator and pairs == self._kerningMutatorPairs:\n return self._kerningMutator\n kerningItems = []\n if pairs is None:\n for sourceDescriptor in self.sources:\n if sourceDescriptor.layerName is not None:\n continue\n if not sourceDescriptor.muteKerning:\n loc = Location(sourceDescriptor.location)\n sourceFont = self.fonts[sourceDescriptor.name]\n if sourceFont is None: continue\n # this makes assumptions about the groups of all sources being the same.\n kerningItems.append((loc, self.mathKerningClass(sourceFont.kerning, sourceFont.groups)))\n else:\n self._kerningMutatorPairs = pairs\n for sourceDescriptor in self.sources:\n # XXX check sourceDescriptor layerName, only foreground should contribute\n if sourceDescriptor.layerName is not None:\n continue\n if not os.path.exists(sourceDescriptor.path):\n continue\n if not sourceDescriptor.muteKerning:\n sourceFont = self.fonts[sourceDescriptor.name]\n if sourceFont is None:\n continue\n loc = Location(sourceDescriptor.location)\n # XXX can we get the kern value from the fontparts kerning object?\n kerningItem = self.mathKerningClass(sourceFont.kerning, sourceFont.groups)\n sparseKerning = {}\n for pair in pairs:\n v = kerningItem.get(pair)\n if v is not None:\n sparseKerning[pair] = v\n kerningItems.append((loc, self.mathKerningClass(sparseKerning)))\n bias, self._kerningMutator = self.getVariationModel(kerningItems, axes=self.serializedAxes, bias=self.newDefaultLocation())\n return self._kerningMutator", "response": "Return a kerning mutator collect the sources build mathGlyphs."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncollect all items for a given glyph.", "response": "def collectMastersForGlyph(self, glyphName, decomposeComponents=False):\n \"\"\" Return a glyph mutator.defaultLoc\n decomposeComponents = True causes the source glyphs to be decomposed first\n before building the mutator. That gives you instances that do not depend\n on a complete font. If you're calculating previews for instance.\n\n XXX check glyphs in layers\n \"\"\"\n items = []\n empties = []\n foundEmpty = False\n for sourceDescriptor in self.sources:\n if not os.path.exists(sourceDescriptor.path):\n #kthxbai\n p = \"\\tMissing UFO at %s\" % sourceDescriptor.path\n if p not in self.problems:\n self.problems.append(p)\n continue\n if glyphName in sourceDescriptor.mutedGlyphNames:\n continue\n thisIsDefault = self.default == sourceDescriptor\n ignoreMaster, filteredLocation = self.filterThisLocation(sourceDescriptor.location, self.mutedAxisNames)\n if ignoreMaster:\n continue\n f = self.fonts.get(sourceDescriptor.name)\n if f is None: continue\n loc = Location(sourceDescriptor.location)\n sourceLayer = f\n if not glyphName in f:\n # log this>\n continue\n layerName = getDefaultLayerName(f)\n sourceGlyphObject = None\n # handle source layers\n if sourceDescriptor.layerName is not None:\n # start looking for a layer\n # Do not bother for mutatorMath designspaces\n layerName = sourceDescriptor.layerName\n sourceLayer = getLayer(f, sourceDescriptor.layerName)\n if sourceLayer is None:\n continue\n if glyphName not in sourceLayer:\n # start looking for a glyph\n # this might be a support in a sparse layer\n # so we're skipping!\n continue\n # still have to check if the sourcelayer glyph is empty\n if not glyphName in sourceLayer:\n continue\n else:\n sourceGlyphObject = sourceLayer[glyphName]\n if checkGlyphIsEmpty(sourceGlyphObject, allowWhiteSpace=True):\n foundEmpty = True\n #sourceGlyphObject = None\n #continue\n if decomposeComponents:\n # what about decomposing glyphs in a partial font?\n temp = self.glyphClass()\n p = temp.getPointPen()\n dpp = DecomposePointPen(sourceLayer, p)\n sourceGlyphObject.drawPoints(dpp)\n temp.width = sourceGlyphObject.width\n temp.name = sourceGlyphObject.name\n processThis = temp\n else:\n processThis = sourceGlyphObject\n sourceInfo = dict(source=f.path, glyphName=glyphName,\n layerName=layerName,\n location=filteredLocation, # sourceDescriptor.location,\n sourceName=sourceDescriptor.name,\n )\n if hasattr(processThis, \"toMathGlyph\"):\n processThis = processThis.toMathGlyph()\n else:\n processThis = self.mathGlyphClass(processThis)\n items.append((loc, processThis, sourceInfo))\n empties.append((thisIsDefault, foundEmpty))\n # check the empties:\n # if the default glyph is empty, then all must be empty\n # if the default glyph is not empty then none can be empty\n checkedItems = []\n emptiesAllowed = False\n # first check if the default is empty.\n # remember that the sources can be in any order\n for i, p in enumerate(empties):\n isDefault, isEmpty = p\n if isDefault and isEmpty:\n emptiesAllowed = True\n # now we know what to look for\n if not emptiesAllowed:\n for i, p in enumerate(empties):\n isDefault, isEmpty = p\n if not isEmpty:\n checkedItems.append(items[i])\n else:\n for i, p in enumerate(empties):\n isDefault, isEmpty = p\n if isEmpty:\n checkedItems.append(items[i])\n return checkedItems"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates a new instance of the specified instance.", "response": "def makeInstance(self, instanceDescriptor,\n doRules=False,\n glyphNames=None,\n pairs=None,\n bend=False):\n \"\"\" Generate a font object for this instance \"\"\"\n font = self._instantiateFont(None)\n # make fonty things here\n loc = Location(instanceDescriptor.location)\n anisotropic = False\n locHorizontal = locVertical = loc\n if self.isAnisotropic(loc):\n anisotropic = True\n locHorizontal, locVertical = self.splitAnisotropic(loc)\n # groups\n renameMap = getattr(self.fonts[self.default.name], \"kerningGroupConversionRenameMaps\", None)\n font.kerningGroupConversionRenameMaps = renameMap if renameMap is not None else {'side1': {}, 'side2': {}}\n # make the kerning\n # this kerning is always horizontal. We can take the horizontal location\n # filter the available pairs?\n if instanceDescriptor.kerning:\n if pairs:\n try:\n kerningMutator = self.getKerningMutator(pairs=pairs)\n kerningObject = kerningMutator.makeInstance(locHorizontal, bend=bend)\n kerningObject.extractKerning(font)\n except:\n self.problems.append(\"Could not make kerning for %s. %s\" % (loc, traceback.format_exc()))\n else:\n kerningMutator = self.getKerningMutator()\n kerningObject = kerningMutator.makeInstance(locHorizontal, bend=bend)\n kerningObject.extractKerning(font)\n # make the info\n try:\n infoMutator = self.getInfoMutator()\n if not anisotropic:\n infoInstanceObject = infoMutator.makeInstance(loc, bend=bend)\n else:\n horizontalInfoInstanceObject = infoMutator.makeInstance(locHorizontal, bend=bend)\n verticalInfoInstanceObject = infoMutator.makeInstance(locVertical, bend=bend)\n # merge them again\n infoInstanceObject = (1,0)*horizontalInfoInstanceObject + (0,1)*verticalInfoInstanceObject\n if self.roundGeometry:\n try:\n infoInstanceObject = infoInstanceObject.round()\n except AttributeError:\n pass\n infoInstanceObject.extractInfo(font.info)\n font.info.familyName = instanceDescriptor.familyName\n font.info.styleName = instanceDescriptor.styleName\n font.info.postscriptFontName = instanceDescriptor.postScriptFontName # yikes, note the differences in capitalisation..\n font.info.styleMapFamilyName = instanceDescriptor.styleMapFamilyName\n font.info.styleMapStyleName = instanceDescriptor.styleMapStyleName\n # NEED SOME HELP WITH THIS\n # localised names need to go to the right openTypeNameRecords\n # records = []\n # nameID = 1\n # platformID =\n # for languageCode, name in instanceDescriptor.localisedStyleMapFamilyName.items():\n # # Name ID 1 (font family name) is found at the generic styleMapFamily attribute.\n # records.append((nameID, ))\n except:\n self.problems.append(\"Could not make fontinfo for %s. %s\" % (loc, traceback.format_exc()))\n for sourceDescriptor in self.sources:\n if sourceDescriptor.copyInfo:\n # this is the source\n if self.fonts[sourceDescriptor.name] is not None:\n self._copyFontInfo(self.fonts[sourceDescriptor.name].info, font.info)\n if sourceDescriptor.copyLib:\n # excplicitly copy the font.lib items\n if self.fonts[sourceDescriptor.name] is not None:\n for key, value in self.fonts[sourceDescriptor.name].lib.items():\n font.lib[key] = value\n if sourceDescriptor.copyGroups:\n if self.fonts[sourceDescriptor.name] is not None:\n sides = font.kerningGroupConversionRenameMaps.get('side1', {})\n sides.update(font.kerningGroupConversionRenameMaps.get('side2', {}))\n for key, value in self.fonts[sourceDescriptor.name].groups.items():\n if key not in sides:\n font.groups[key] = value\n if sourceDescriptor.copyFeatures:\n if self.fonts[sourceDescriptor.name] is not None:\n featuresText = self.fonts[sourceDescriptor.name].features.text\n font.features.text = featuresText\n # glyphs\n if glyphNames:\n selectedGlyphNames = glyphNames\n else:\n selectedGlyphNames = self.glyphNames\n # add the glyphnames to the font.lib['public.glyphOrder']\n if not 'public.glyphOrder' in font.lib.keys():\n font.lib['public.glyphOrder'] = selectedGlyphNames\n for glyphName in selectedGlyphNames:\n try:\n glyphMutator = self.getGlyphMutator(glyphName)\n if glyphMutator is None:\n continue\n except:\n self.problems.append(\"Could not make mutator for glyph %s %s\" % (glyphName, traceback.format_exc()))\n continue\n if glyphName in instanceDescriptor.glyphs.keys():\n # XXX this should be able to go now that we have full rule support.\n # reminder: this is what the glyphData can look like\n # {'instanceLocation': {'custom': 0.0, 'weight': 824.0},\n # 'masters': [{'font': 'master.Adobe VF Prototype.Master_0.0',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 0.0, 'weight': 0.0}},\n # {'font': 'master.Adobe VF Prototype.Master_1.1',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 0.0, 'weight': 368.0}},\n # {'font': 'master.Adobe VF Prototype.Master_2.2',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 0.0, 'weight': 1000.0}},\n # {'font': 'master.Adobe VF Prototype.Master_3.3',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 100.0, 'weight': 1000.0}},\n # {'font': 'master.Adobe VF Prototype.Master_0.4',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 100.0, 'weight': 0.0}},\n # {'font': 'master.Adobe VF Prototype.Master_4.5',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 100.0, 'weight': 368.0}}],\n # 'unicodes': [36]}\n glyphData = instanceDescriptor.glyphs[glyphName]\n else:\n glyphData = {}\n font.newGlyph(glyphName)\n font[glyphName].clear()\n if glyphData.get('mute', False):\n # mute this glyph, skip\n continue\n glyphInstanceLocation = glyphData.get(\"instanceLocation\", instanceDescriptor.location)\n glyphInstanceLocation = Location(glyphInstanceLocation)\n uniValues = []\n neutral = glyphMutator.get(())\n if neutral is not None:\n uniValues = neutral[0].unicodes\n else:\n neutralFont = self.getNeutralFont()\n if glyphName in neutralFont:\n uniValues = neutralFont[glyphName].unicodes\n glyphInstanceUnicodes = glyphData.get(\"unicodes\", uniValues)\n note = glyphData.get(\"note\")\n if note:\n font[glyphName] = note\n # XXXX phase out support for instance-specific masters\n # this should be handled by the rules system.\n masters = glyphData.get(\"masters\", None)\n if masters is not None:\n items = []\n for glyphMaster in masters:\n sourceGlyphFont = glyphMaster.get(\"font\")\n sourceGlyphName = glyphMaster.get(\"glyphName\", glyphName)\n m = self.fonts.get(sourceGlyphFont)\n if not sourceGlyphName in m:\n continue\n if hasattr(m[sourceGlyphName], \"toMathGlyph\"):\n sourceGlyph = m[sourceGlyphName].toMathGlyph()\n else:\n sourceGlyph = MathGlyph(m[sourceGlyphName])\n sourceGlyphLocation = glyphMaster.get(\"location\")\n items.append((Location(sourceGlyphLocation), sourceGlyph))\n bias, glyphMutator = self.getVariationModel(items, axes=self.serializedAxes, bias=self.newDefaultLocation())\n try:\n if not self.isAnisotropic(glyphInstanceLocation):\n glyphInstanceObject = glyphMutator.makeInstance(glyphInstanceLocation, bend=bend)\n else:\n # split anisotropic location into horizontal and vertical components\n horizontal, vertical = self.splitAnisotropic(glyphInstanceLocation)\n horizontalGlyphInstanceObject = glyphMutator.makeInstance(horizontal, bend=bend)\n verticalGlyphInstanceObject = glyphMutator.makeInstance(vertical, bend=bend)\n # merge them again\n glyphInstanceObject = (1,0)*horizontalGlyphInstanceObject + (0,1)*verticalGlyphInstanceObject\n except IndexError:\n # alignment problem with the data?\n continue\n font.newGlyph(glyphName)\n font[glyphName].clear()\n if self.roundGeometry:\n try:\n glyphInstanceObject = glyphInstanceObject.round()\n except AttributeError:\n pass\n try:\n # File \"/Users/erik/code/ufoProcessor/Lib/ufoProcessor/__init__.py\", line 649, in makeInstance\n # glyphInstanceObject.extractGlyph(font[glyphName], onlyGeometry=True)\n # File \"/Applications/RoboFont.app/Contents/Resources/lib/python3.6/fontMath/mathGlyph.py\", line 315, in extractGlyph\n # glyph.anchors = [dict(anchor) for anchor in self.anchors]\n # File \"/Applications/RoboFont.app/Contents/Resources/lib/python3.6/fontParts/base/base.py\", line 103, in __set__\n # raise FontPartsError(\"no setter for %r\" % self.name)\n # fontParts.base.errors.FontPartsError: no setter for 'anchors'\n if hasattr(font[glyphName], \"fromMathGlyph\"):\n font[glyphName].fromMathGlyph(glyphInstanceObject)\n else:\n glyphInstanceObject.extractGlyph(font[glyphName], onlyGeometry=True)\n except TypeError:\n # this causes ruled glyphs to end up in the wrong glyphname\n # but defcon2 objects don't support it\n pPen = font[glyphName].getPointPen()\n font[glyphName].clear()\n glyphInstanceObject.drawPoints(pPen)\n font[glyphName].width = glyphInstanceObject.width\n font[glyphName].unicodes = glyphInstanceUnicodes\n if doRules:\n resultNames = processRules(self.rules, loc, self.glyphNames)\n for oldName, newName in zip(self.glyphNames, resultNames):\n if oldName != newName:\n swapGlyphNames(font, oldName, newName)\n # copy the glyph lib?\n #for sourceDescriptor in self.sources:\n # if sourceDescriptor.copyLib:\n # pass\n # pass\n # store designspace location in the font.lib\n font.lib['designspace.location'] = list(instanceDescriptor.location.items())\n return font"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a instance of a font object with all the given subclasses", "response": "def _instantiateFont(self, path):\n \"\"\" Return a instance of a font object with all the given subclasses\"\"\"\n try:\n return self.fontClass(path,\n layerClass=self.layerClass,\n libClass=self.libClass,\n kerningClass=self.kerningClass,\n groupsClass=self.groupsClass,\n infoClass=self.infoClass,\n featuresClass=self.featuresClass,\n glyphClass=self.glyphClass,\n glyphContourClass=self.glyphContourClass,\n glyphPointClass=self.glyphPointClass,\n glyphComponentClass=self.glyphComponentClass,\n glyphAnchorClass=self.glyphAnchorClass)\n except TypeError:\n # if our fontClass doesnt support all the additional classes\n return self.fontClass(path)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef checkGlyphIsEmpty(glyph, allowWhiteSpace=True):\n whiteSpace = [ 0x9, # horizontal tab\n 0xa, # line feed\n 0xb, # vertical tab\n 0xc, # form feed\n 0xd, # carriage return\n 0x20, # space\n 0x85, # next line\n 0xa0, # nobreak space\n 0x1680, # ogham space mark\n 0x180e, # mongolian vowel separator\n 0x2000, # en quad\n 0x2001, # em quad\n 0x2003, # en space\n 0x2004, # three per em space\n 0x2005, # four per em space\n 0x2006, # six per em space\n 0x2007, # figure space\n 0x2008, # punctuation space\n 0x2009, # thin space\n 0x200a, # hair space\n 0x2028, # line separator\n 0x2029, # paragraph separator\n 0x202f, # narrow no break space\n 0x205f, # medium mathematical space\n 0x3000, # ideographic space\n ]\n emptyPen = EmptyPen()\n glyph.drawPoints(emptyPen)\n if emptyPen.isEmpty():\n # we're empty?\n if glyph.unicode in whiteSpace and allowWhiteSpace:\n # are we allowed to be?\n return False\n return True\n return False", "response": "Checks if a glyph is completely empty."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nconfiguring and build the ns - 3 code.", "response": "def configure_and_build(self, show_progress=True, optimized=True,\n skip_configuration=False):\n \"\"\"\n Configure and build the ns-3 code.\n\n Args:\n show_progress (bool): whether or not to display a progress bar\n during compilation.\n optimized (bool): whether to use an optimized build. If False, use\n a standard ./waf configure.\n skip_configuration (bool): whether to skip the configuration step,\n and only perform compilation.\n \"\"\"\n\n # Only configure if necessary\n if not skip_configuration:\n configuration_command = ['python', 'waf', 'configure', '--enable-examples',\n '--disable-gtk', '--disable-python']\n\n if optimized:\n configuration_command += ['--build-profile=optimized',\n '--out=build/optimized']\n\n # Check whether path points to a valid installation\n subprocess.call(configuration_command, cwd=self.path,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Build ns-3\n build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # Show a progress bar\n if show_progress:\n line_iterator = self.get_build_output(build_process)\n pbar = None\n try:\n [initial, total] = next(line_iterator)\n pbar = tqdm(line_iterator, initial=initial, total=total,\n unit='file', desc='Building ns-3', smoothing=0)\n for current, total in pbar:\n pbar.n = current\n except (StopIteration):\n if pbar is not None:\n pbar.n = pbar.total\n else: # Wait for the build to finish anyway\n build_process.communicate()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nparsing the output of the ns - 3 build process and yield the information that is needed to draw the progress bar.", "response": "def get_build_output(self, process):\n \"\"\"\n Parse the output of the ns-3 build process to extract the information\n that is needed to draw the progress bar.\n\n Args:\n process: the subprocess instance to listen to.\n \"\"\"\n\n while True:\n output = process.stdout.readline()\n if output == b'' and process.poll() is not None:\n if process.returncode > 0:\n raise Exception(\"Compilation ended with an error\"\n \".\\nSTDERR\\n%s\\nSTDOUT\\n%s\" %\n (process.stderr.read(),\n process.stdout.read()))\n return\n if output:\n # Parse the output to get current and total tasks\n # This assumes the progress displayed by waf is in the form\n # [current/total]\n matches = re.search(r'\\[\\s*(\\d+?)/(\\d+)\\].*',\n output.strip().decode('utf-8'))\n if matches is not None:\n yield [int(matches.group(1)), int(matches.group(2))]"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_available_parameters(self):\n\n # At the moment, we rely on regex to extract the list of available\n # parameters. This solution will break if the format of the output\n # changes, but this is the best option that is currently available.\n\n result = subprocess.check_output([self.script_executable,\n '--PrintHelp'], env=self.environment,\n cwd=self.path).decode('utf-8')\n\n # Isolate the list of parameters\n options = re.findall('.*Program\\s(?:Options|Arguments):'\n '(.*)General\\sArguments.*',\n result, re.DOTALL)\n\n global_options = subprocess.check_output([self.script_executable,\n '--PrintGlobals'],\n env=self.environment,\n cwd=self.path).decode('utf-8')\n\n # Get the single parameter names\n args = []\n if len(options):\n args += re.findall('.*--(.*?)[?::|=].*', options[0], re.MULTILINE)\n if len(global_options):\n args += [k for k in re.findall('.*--(.*?)[?::|=].*',\n global_options, re.MULTILINE) if k\n not in ['RngRun', 'RngSeed', 'SchedulerType',\n 'SimulatorImplementationType', 'ChecksumEnabled']]\n return sorted(args)", "response": "Get the list of parameters that are available by the script."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef run_simulations(self, parameter_list, data_folder):\n\n for idx, parameter in enumerate(parameter_list):\n\n current_result = {\n 'params': {},\n 'meta': {}\n }\n current_result['params'].update(parameter)\n\n command = [self.script_executable] + ['--%s=%s' % (param, value)\n for param, value in\n parameter.items()]\n\n # Run from dedicated temporary folder\n current_result['meta']['id'] = str(uuid.uuid4())\n temp_dir = os.path.join(data_folder, current_result['meta']['id'])\n os.makedirs(temp_dir)\n\n start = time.time() # Time execution\n stdout_file_path = os.path.join(temp_dir, 'stdout')\n stderr_file_path = os.path.join(temp_dir, 'stderr')\n with open(stdout_file_path, 'w') as stdout_file, open(\n stderr_file_path, 'w') as stderr_file:\n return_code = subprocess.call(command, cwd=temp_dir,\n env=self.environment,\n stdout=stdout_file,\n stderr=stderr_file)\n end = time.time() # Time execution\n\n if return_code > 0:\n complete_command = [self.script]\n complete_command.extend(command[1:])\n complete_command = \"python waf --run \\\"%s\\\"\" % (\n ' '.join(complete_command))\n\n with open(stdout_file_path, 'r') as stdout_file, open(\n stderr_file_path, 'r') as stderr_file:\n raise Exception(('Simulation exited with an error.\\n'\n 'Params: %s\\n'\n '\\nStderr: %s\\n'\n 'Stdout: %s\\n'\n 'Use this command to reproduce:\\n'\n '%s'\n % (parameter, stderr_file.read(),\n stdout_file.read(), complete_command)))\n\n current_result['meta']['elapsed_time'] = end-start\n\n yield current_result", "response": "Runs several simulations using a certain combination of parameters. Yields results as simulations are completed."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncreating a list of all parameter combinations from a dictionary specifying the desired parameter values as lists.", "response": "def list_param_combinations(param_ranges):\n \"\"\"\n Create a list of all parameter combinations from a dictionary specifying\n desired parameter values as lists.\n\n Example:\n\n >>> param_ranges = {'a': [1], 'b': [2, 3]}\n >>> list_param_combinations(param_ranges)\n [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}]\n\n Additionally, this function is robust in case values are not lists:\n\n >>> param_ranges = {'a': 1, 'b': [2, 3]}\n >>> list_param_combinations(param_ranges)\n [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}]\n\n \"\"\"\n # Convert non-list values to single-element lists\n # This is required to make sure product work.\n for key in param_ranges:\n if not isinstance(param_ranges[key], list):\n param_ranges[key] = [param_ranges[key]]\n\n return [dict(zip(param_ranges, v)) for v in\n product(*param_ranges.values())]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_command_from_result(script, result, debug=False):\n if not debug:\n command = \"python waf --run \\\"\" + script + \" \" + \" \".join(\n ['--%s=%s' % (param, value) for param, value in\n result['params'].items()]) + \"\\\"\"\n else:\n command = \"python waf --run \" + script + \" --command-template=\\\"\" +\\\n \"gdb --args %s \" + \" \".join(['--%s=%s' % (param, value) for\n param, value in\n result['params'].items()]) + \"\\\"\"\n return command", "response": "Returns the command that is needed to obtain a certain result."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ntry and automatically convert strings formatted as tables into nested objects.", "response": "def automatic_parser(result, dtypes={}, converters={}):\n \"\"\"\n Try and automatically convert strings formatted as tables into nested\n list structures.\n\n Under the hood, this function essentially applies the genfromtxt function\n to all files in the output, and passes it the additional kwargs.\n\n Args:\n result (dict): the result to parse.\n dtypes (dict): a dictionary containing the dtype specification to perform\n parsing for each available filename. See the numpy genfromtxt\n documentation for more details on how to format these.\n \"\"\"\n np.seterr(all='raise')\n parsed = {}\n\n for filename, contents in result['output'].items():\n if dtypes.get(filename) is None:\n dtypes[filename] = None\n if converters.get(filename) is None:\n converters[filename] = None\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n parsed[filename] = np.genfromtxt(io.StringIO(contents),\n dtype=dtypes[filename],\n converters=converters[filename]\n ).tolist()\n\n return parsed"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef new(cls, ns_path, script, campaign_dir, runner_type='Auto',\n overwrite=False, optimized=True, check_repo=True):\n \"\"\"\n Create a new campaign from an ns-3 installation and a campaign\n directory.\n\n This method will create a DatabaseManager, which will install a\n database in the specified campaign_dir. If a database is already\n available at the ns_path described in the specified campaign_dir and\n its configuration matches config, this instance is used instead. If the\n overwrite argument is set to True instead, the specified directory is\n wiped and a new campaign is created in its place.\n\n Furthermore, this method will initialize a SimulationRunner, of type\n specified by the runner_type parameter, which will be locked on the\n ns-3 installation at ns_path and set up to run the desired script.\n\n Finally, note that creation of a campaign requires a git repository to\n be initialized at the specified ns_path. This will allow SEM to save\n the commit at which the simulations are run, enforce reproducibility\n and avoid mixing results coming from different versions of ns-3 and its\n libraries.\n\n Args:\n ns_path (str): path to the ns-3 installation to employ in this\n campaign.\n script (str): ns-3 script that will be executed to run simulations.\n campaign_dir (str): path to the directory in which to save the\n simulation campaign database.\n runner_type (str): implementation of the SimulationRunner to use.\n Value can be: SimulationRunner (for running sequential\n simulations locally), ParallelRunner (for running parallel\n simulations locally), GridRunner (for running simulations using\n a DRMAA-compatible parallel task scheduler). Use Auto to\n automatically pick the best runner.\n overwrite (bool): whether to overwrite already existing\n campaign_dir folders. This deletes the directory if and only if\n it only contains files that were detected to be created by sem.\n optimized (bool): whether to configure the runner to employ an\n optimized ns-3 build.\n \"\"\"\n # Convert paths to be absolute\n ns_path = os.path.abspath(ns_path)\n campaign_dir = os.path.abspath(campaign_dir)\n\n # Verify if the specified campaign is already available\n if Path(campaign_dir).exists() and not overwrite:\n # Try loading\n manager = CampaignManager.load(campaign_dir, ns_path,\n runner_type=runner_type,\n optimized=optimized,\n check_repo=check_repo)\n\n if manager.db.get_script() == script:\n return manager\n else:\n del manager\n\n # Initialize runner\n runner = CampaignManager.create_runner(ns_path, script,\n runner_type=runner_type,\n optimized=optimized)\n\n # Get list of parameters to save in the DB\n params = runner.get_available_parameters()\n\n # Get current commit\n commit = \"\"\n if check_repo:\n from git import Repo, exc\n commit = Repo(ns_path).head.commit.hexsha\n\n # Create a database manager from the configuration\n db = DatabaseManager.new(script=script,\n params=params,\n commit=commit,\n campaign_dir=campaign_dir,\n overwrite=overwrite)\n\n return cls(db, runner, check_repo)", "response": "Creates a new instance of the type\n with the given ns - path script and campaign_dir."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nload an existing simulation campaign from a directory containing a ns - 3 installation.", "response": "def load(cls, campaign_dir, ns_path=None, runner_type='Auto',\n optimized=True, check_repo=True):\n \"\"\"\n Load an existing simulation campaign.\n\n Note that specifying an ns-3 installation is not compulsory when using\n this method: existing results will be available, but in order to run\n additional simulations it will be necessary to specify a\n SimulationRunner object, and assign it to the CampaignManager.\n\n Args:\n campaign_dir (str): path to the directory in which to save the\n simulation campaign database.\n ns_path (str): path to the ns-3 installation to employ in this\n campaign.\n runner_type (str): implementation of the SimulationRunner to use.\n Value can be: SimulationRunner (for running sequential\n simulations locally), ParallelRunner (for running parallel\n simulations locally), GridRunner (for running simulations using\n a DRMAA-compatible parallel task scheduler).\n optimized (bool): whether to configure the runner to employ an\n optimized ns-3 build.\n \"\"\"\n # Convert paths to be absolute\n if ns_path is not None:\n ns_path = os.path.abspath(ns_path)\n campaign_dir = os.path.abspath(campaign_dir)\n\n # Read the existing configuration into the new DatabaseManager\n db = DatabaseManager.load(campaign_dir)\n script = db.get_script()\n\n runner = None\n if ns_path is not None:\n runner = CampaignManager.create_runner(ns_path, script,\n runner_type, optimized)\n\n return cls(db, runner, check_repo)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef create_runner(ns_path, script, runner_type='Auto',\n optimized=True):\n \"\"\"\n Create a SimulationRunner from a string containing the desired\n class implementation, and return it.\n\n Args:\n ns_path (str): path to the ns-3 installation to employ in this\n SimulationRunner.\n script (str): ns-3 script that will be executed to run simulations.\n runner_type (str): implementation of the SimulationRunner to use.\n Value can be: SimulationRunner (for running sequential\n simulations locally), ParallelRunner (for running parallel\n simulations locally), GridRunner (for running simulations using\n a DRMAA-compatible parallel task scheduler). If Auto,\n automatically pick the best available runner (GridRunner if\n DRMAA is available, ParallelRunner otherwise).\n optimized (bool): whether to configure the runner to employ an\n optimized ns-3 build.\n \"\"\"\n # locals() contains a dictionary pairing class names with class\n # objects: we can create the object using the desired class starting\n # from its name.\n if runner_type == 'Auto' and DRMAA_AVAILABLE:\n runner_type = 'GridRunner'\n elif runner_type == 'Auto':\n runner_type = 'ParallelRunner'\n\n return locals().get(runner_type,\n globals().get(runner_type))(\n ns_path, script, optimized=optimized)", "response": "Create a SimulationRunner from a string containing the desired\n class implementation and return it."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nruns several simulations specified by a list of parameter combinations.", "response": "def run_simulations(self, param_list, show_progress=True):\n \"\"\"\n Run several simulations specified by a list of parameter combinations.\n\n Note: this function does not verify whether we already have the\n required simulations in the database - it just runs all the parameter\n combinations that are specified in the list.\n\n Args:\n param_list (list): list of parameter combinations to execute.\n Items of this list are dictionaries, with one key for each\n parameter, and a value specifying the parameter value (which\n can be either a string or a number).\n show_progress (bool): whether or not to show a progress bar with\n percentage and expected remaining time.\n \"\"\"\n\n # Make sure we have a runner to run simulations with.\n # This can happen in case the simulation campaign is loaded and not\n # created from scratch.\n if self.runner is None:\n raise Exception(\"No runner was ever specified\"\n \" for this CampaignManager.\")\n\n # Return if the list is empty\n if param_list == []:\n return\n\n # Check all parameter combinations fully specify the desired simulation\n desired_params = self.db.get_params()\n for p in param_list:\n # Besides the parameters that were actually passed, we add the ones\n # that are always available in every script\n passed = list(p.keys())\n available = ['RngRun'] + desired_params\n if set(passed) != set(available):\n raise ValueError(\"Specified parameter combination does not \"\n \"match the supported parameters:\\n\"\n \"Passed: %s\\nSupported: %s\" %\n (sorted(passed), sorted(available)))\n\n # Check that the current repo commit corresponds to the one specified\n # in the campaign\n if self.check_repo:\n self.check_repo_ok()\n\n # Build ns-3 before running any simulations\n # At this point, we can assume the project was already configured\n self.runner.configure_and_build(skip_configuration=True)\n\n # Shuffle simulations\n # This mixes up long and short simulations, and gives better time\n # estimates.\n shuffle(param_list)\n\n # Offload simulation execution to self.runner\n # Note that this only creates a generator for the results, no\n # computation is performed on this line.\n results = self.runner.run_simulations(param_list,\n self.db.get_data_dir())\n\n # Wrap the result generator in the progress bar generator.\n if show_progress:\n result_generator = tqdm(results, total=len(param_list),\n unit='simulation',\n desc='Running simulations')\n else:\n result_generator = results\n\n # Insert result object in db. Using the generator here ensures we\n # save results as they are finalized by the SimulationRunner, and\n # that they are kept even if execution is terminated abruptly by\n # crashes or by a KeyboardInterrupt.\n for result in result_generator:\n self.db.insert_result(result)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_missing_simulations(self, param_list, runs=None):\n\n params_to_simulate = []\n\n if runs is not None: # Get next available runs from the database\n next_runs = self.db.get_next_rngruns()\n available_params = [r['params'] for r in self.db.get_results()]\n for param_comb in param_list:\n # Count how many param combinations we found, and remove them\n # from the list of available_params for faster searching in the\n # future\n needed_runs = runs\n for i, p in enumerate(available_params):\n if param_comb == {k: p[k] for k in p.keys() if k != \"RngRun\"}:\n needed_runs -= 1\n new_param_combs = []\n for needed_run in range(needed_runs):\n # Here it's important that we make copies of the\n # dictionaries, so that if we modify one we don't modify\n # the others. This is necessary because after this step,\n # typically, we will add the RngRun key which must be\n # different for each copy.\n new_param = deepcopy(param_comb)\n new_param['RngRun'] = next(next_runs)\n new_param_combs += [new_param]\n params_to_simulate += new_param_combs\n else:\n for param_comb in param_list:\n if not self.db.get_results(param_comb):\n params_to_simulate += [param_comb]\n\n return params_to_simulate", "response": "Returns a list of the simulations among the required ones that are not available in the database."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef run_missing_simulations(self, param_list, runs=None):\n # If we are passed a dictionary, we need to expand this\n if isinstance(param_list, dict):\n param_list = list_param_combinations(param_list)\n\n # If we are passed a list already, just run the missing simulations\n self.run_simulations(\n self.get_missing_simulations(param_list, runs))", "response": "This function runs the missing simulations from the parameter list that are not yet available in the database."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_results_as_numpy_array(self, parameter_space,\n result_parsing_function, runs):\n \"\"\"\n Return the results relative to the desired parameter space in the form\n of a numpy array.\n\n Args:\n parameter_space (dict): dictionary containing\n parameter/list-of-values pairs.\n result_parsing_function (function): user-defined function, taking a\n result dictionary as argument, that can be used to parse the\n result files and return a list of values.\n runs (int): number of runs to gather for each parameter\n combination.\n \"\"\"\n return np.array(self.get_space(self.db.get_complete_results(), {},\n parameter_space, runs,\n result_parsing_function))", "response": "Returns the results of the a\n as a numpy array."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef save_to_mat_file(self, parameter_space,\n result_parsing_function,\n filename, runs):\n \"\"\"\n Return the results relative to the desired parameter space in the form\n of a .mat file.\n\n Args:\n parameter_space (dict): dictionary containing\n parameter/list-of-values pairs.\n result_parsing_function (function): user-defined function, taking a\n result dictionary as argument, that can be used to parse the\n result files and return a list of values.\n filename (path): name of output .mat file.\n runs (int): number of runs to gather for each parameter\n combination.\n \"\"\"\n\n # Make sure all values are lists\n for key in parameter_space:\n if not isinstance(parameter_space[key], list):\n parameter_space[key] = [parameter_space[key]]\n\n # Add a dimension label for each non-singular dimension\n dimension_labels = [{key: str(parameter_space[key])} for key in\n parameter_space.keys() if len(parameter_space[key])\n > 1] + [{'runs': range(runs)}]\n\n # Create a list of the parameter names\n\n return savemat(\n filename,\n {'results':\n self.get_results_as_numpy_array(parameter_space,\n result_parsing_function,\n runs=runs),\n 'dimension_labels': dimension_labels})", "response": "Save the results of the current run to a. mat file."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nsaves results to a numpy array file format.", "response": "def save_to_npy_file(self, parameter_space,\n result_parsing_function,\n filename, runs):\n \"\"\"\n Save results to a numpy array file format.\n \"\"\"\n np.save(filename, self.get_results_as_numpy_array(\n parameter_space, result_parsing_function, runs=runs))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsaving results to a folder structure.", "response": "def save_to_folders(self, parameter_space, folder_name, runs):\n \"\"\"\n Save results to a folder structure.\n \"\"\"\n self.space_to_folders(self.db.get_results(), {}, parameter_space, runs,\n folder_name)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef space_to_folders(self, current_result_list, current_query, param_space,\n runs, current_directory):\n \"\"\"\n Convert a parameter space specification to a directory tree with a\n nested structure.\n \"\"\"\n # Base case: we iterate over the runs and copy files in the final\n # directory.\n if not param_space:\n for run, r in enumerate(current_result_list[:runs]):\n files = self.db.get_result_files(r)\n new_dir = os.path.join(current_directory, \"run=%s\" % run)\n os.makedirs(new_dir, exist_ok=True)\n for filename, filepath in files.items():\n shutil.copyfile(filepath, os.path.join(new_dir, filename))\n return\n\n [key, value] = list(param_space.items())[0]\n # Iterate over dictionary values\n for v in value:\n next_query = deepcopy(current_query)\n temp_query = deepcopy(current_query)\n\n # For each list, recur 'fixing' that dimension.\n next_query[key] = v # Update query\n\n # Create folder\n folder_name = (\"%s=%s\" % (key, v)).replace('/', '_')\n new_dir = os.path.join(current_directory, folder_name)\n os.makedirs(new_dir, exist_ok=True)\n\n next_param_space = deepcopy(param_space)\n del(next_param_space[key])\n\n temp_query[key] = v\n temp_result_list = [r for r in current_result_list if\n self.satisfies_query(r, temp_query)]\n\n self.space_to_folders(temp_result_list, next_query,\n next_param_space, runs, new_dir)", "response": "Convert a parameter space specification to a nested structure with a nested structure."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the results of the user - defined function in the parameter space in the form of an xarray data structure.", "response": "def get_results_as_xarray(self, parameter_space,\n result_parsing_function,\n output_labels, runs):\n \"\"\"\n Return the results relative to the desired parameter space in the form\n of an xarray data structure.\n\n Args:\n parameter_space (dict): The space of parameters to export.\n result_parsing_function (function): user-defined function, taking a\n result dictionary as argument, that can be used to parse the\n result files and return a list of values.\n output_labels (list): a list of labels to apply to the results\n dimensions, output by the result_parsing_function.\n runs (int): the number of runs to export for each parameter\n combination.\n \"\"\"\n np_array = np.array(\n self.get_space(\n self.db.get_complete_results(), {},\n collections.OrderedDict([(k, v) for k, v in\n parameter_space.items()]),\n runs, result_parsing_function))\n\n # Create a parameter space only containing the variable parameters\n clean_parameter_space = collections.OrderedDict(\n [(k, v) for k, v in parameter_space.items()])\n\n clean_parameter_space['runs'] = range(runs)\n\n if isinstance(output_labels, list):\n clean_parameter_space['metrics'] = output_labels\n\n xr_array = xr.DataArray(np_array, coords=clean_parameter_space,\n dims=list(clean_parameter_space.keys()))\n\n return xr_array"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_space(self, current_result_list, current_query, param_space, runs,\n result_parsing_function):\n \"\"\"\n Convert a parameter space specification to a nested array structure\n representing the space. In other words, if the parameter space is::\n\n param_space = {\n 'a': [1, 2],\n 'b': [3, 4]\n }\n\n the function will return a structure like the following::\n\n [\n [\n {'a': 1, 'b': 3},\n {'a': 1, 'b': 4}\n ],\n [\n {'a': 2, 'b': 3},\n {'a': 2, 'b': 4}\n ]\n ]\n\n where the first dimension represents a, and the second dimension\n represents b. This nested-array structure can then be easily converted\n to a numpy array via np.array().\n\n Args:\n current_query (dict): the query to apply to the structure.\n param_space (dict): representation of the parameter space.\n result_parsing_function (function): user-defined function to call\n on results, typically used to parse data and outputting\n metrics.\n runs (int): the number of runs to query for each parameter\n combination.\n \"\"\"\n if result_parsing_function is None:\n result_parsing_function = CampaignManager.files_in_dictionary\n # Note that this function operates recursively.\n\n # Base case\n if not param_space:\n results = [r for r in current_result_list if\n self.satisfies_query(r, current_query)]\n parsed = []\n for r in results[:runs]:\n parsed.append(result_parsing_function(r))\n\n return parsed\n\n space = []\n [key, value] = list(param_space.items())[0]\n # Iterate over dictionary values\n for v in value:\n next_query = deepcopy(current_query)\n temp_query = deepcopy(current_query)\n # For each list, recur 'fixing' that dimension.\n next_query[key] = v\n next_param_space = deepcopy(param_space)\n del(next_param_space[key])\n temp_query[key] = v\n temp_result_list = [r for r in current_result_list if\n self.satisfies_query(r, temp_query)]\n space.append(self.get_space(temp_result_list, next_query,\n next_param_space, runs,\n result_parsing_function))\n return space", "response": "This function returns a nested array structure representing the parameter space."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef check_repo_ok(self):\n from git import Repo, exc\n # Check that git is at the expected commit and that the repo is not\n # dirty\n if self.runner is not None:\n path = self.runner.path\n try:\n repo = Repo(path)\n except(exc.InvalidGitRepositoryError):\n raise Exception(\"No git repository detected.\\nIn order to \"\n \"use SEM and its reproducibility enforcing \"\n \"features, please create a git repository at \"\n \"the root of your ns-3 project.\")\n current_commit = repo.head.commit.hexsha\n campaign_commit = self.db.get_commit()\n\n if repo.is_dirty(untracked_files=True):\n raise Exception(\"ns-3 repository is not clean\")\n\n if current_commit != campaign_commit:\n raise Exception(\"ns-3 repository is on a different commit \"\n \"from the one specified in the campaign\")", "response": "Check that the ns - 3 repository is clean and that the commit of the one - item repository is the same as the campaign s HEAD commit."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nviewing results of simulations.", "response": "def view(results_dir, result_id, hide_simulation_output, parameters, no_pager):\n \"\"\"\n View results of simulations.\n \"\"\"\n\n campaign = sem.CampaignManager.load(results_dir)\n\n # Pick the most appropriate function based on the level of detail we want\n if hide_simulation_output:\n get_results_function = campaign.db.get_results\n else:\n get_results_function = campaign.db.get_complete_results\n\n # If a result id was specified, just query for that result\n if result_id:\n output = '\\n\\n\\n'.join([pprint.pformat(item) for item in\n get_results_function(result_id=result_id)])\n else:\n\n [params, defaults] = zip(*get_params_and_defaults(\n campaign.db.get_params(), campaign.db))\n\n if not parameters:\n # Convert to string\n string_defaults = list()\n for idx, d in enumerate(defaults):\n string_defaults.append(str(d))\n\n script_params = query_parameters(params, string_defaults)\n else:\n script_params = import_parameters_from_file(parameters)\n\n # Perform the search\n output = '\\n\\n\\n'.join([pprint.pformat(item) for item in\n get_results_function(script_params)])\n\n # Print the results\n if no_pager:\n click.echo(output)\n else:\n click.echo_via_pager(output)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef command(results_dir, result_id):\n campaign = sem.CampaignManager.load(results_dir)\n\n result = campaign.db.get_results(result_id=result_id)[0]\n\n click.echo(\"Simulation command:\")\n click.echo(sem.utils.get_command_from_result(campaign.db.get_script(),\n result))\n click.echo(\"Debug command:\")\n click.echo(sem.utils.get_command_from_result(campaign.db.get_script(),\n result,\n debug=True))", "response": "Print the command that needs to be used to reproduce a result."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef export(results_dir, filename, do_not_try_parsing, parameters):\n\n # Get the extension\n _, extension = os.path.splitext(filename)\n\n campaign = sem.CampaignManager.load(results_dir)\n\n [params, defaults] = zip(*get_params_and_defaults(campaign.db.get_params(),\n campaign.db))\n\n if do_not_try_parsing:\n parsing_function = None\n else:\n parsing_function = sem.utils.automatic_parser\n\n if not parameters:\n # Convert to string\n string_defaults = list()\n for idx, d in enumerate(defaults):\n string_defaults.append(str(d))\n parameter_query = query_parameters(params, string_defaults)\n else:\n # Import specified parameter file\n parameter_query = import_parameters_from_file(parameters)\n\n if extension == \".mat\":\n campaign.save_to_mat_file(parameter_query, parsing_function, filename,\n runs=click.prompt(\"Runs\", type=int))\n elif extension == \".npy\":\n campaign.save_to_npy_file(parameter_query, parsing_function, filename,\n runs=click.prompt(\"Runs\", type=int))\n elif extension == \"\":\n campaign.save_to_folders(parameter_query, filename,\n runs=click.prompt(\"Runs\", type=int))\n else: # Unrecognized format\n raise ValueError(\"Format not recognized\")", "response": "Export the results to a file."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nmerge multiple results folder into one by copying the results over to a new folder.", "response": "def merge(move, output_dir, sources):\n \"\"\"\n Merge multiple results folder into one, by copying the results over to a new folder.\n\n For a faster operation (which on the other hand destroys the campaign data\n if interrupted), the move option can be used to directly move results to\n the new folder.\n \"\"\"\n # Get paths for all campaign JSONS\n jsons = []\n for s in sources:\n filename = \"%s.json\" % os.path.split(s)[1]\n jsons += [os.path.join(s, filename)]\n\n # Check that the configuration for all campaigns is the same\n reference_config = TinyDB(jsons[0]).table('config')\n for j in jsons[1:]:\n for i, j in zip(reference_config.all(), TinyDB(j).table('config').all()):\n assert i == j\n\n # Create folders for new results directory\n filename = \"%s.json\" % os.path.split(output_dir)[1]\n output_json = os.path.join(output_dir, filename)\n output_data = os.path.join(output_dir, 'data')\n os.makedirs(output_data)\n\n # Create new database\n db = TinyDB(output_json)\n db.table('config').insert_multiple(reference_config.all())\n\n # Import results from all databases to the new JSON file\n for s in sources:\n filename = \"%s.json\" % os.path.split(s)[1]\n current_db = TinyDB(os.path.join(s, filename))\n db.table('results').insert_multiple(current_db.table('results').all())\n\n # Copy or move results to new data folder\n for s in sources:\n for r in glob.glob(os.path.join(s, 'data/*')):\n basename = os.path.basename(r)\n if move:\n shutil.move(r, os.path.join(output_data, basename))\n else:\n shutil.copytree(r, os.path.join(output_data, basename))\n\n if move:\n for s in sources:\n shutil.rmtree(os.path.join(s, 'data/*'))\n shutil.rmtree(os.path.join(s, \"%s.json\" % os.path.split(s)[1]))\n shutil.rmtree(s)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_params_and_defaults(param_list, db):\n return [[p, d] for p, d in db.get_all_values_of_all_params().items()]", "response": "Returns a list of parameter pairs from the database."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef query_parameters(param_list, defaults=None):\n\n script_params = collections.OrderedDict([k, []] for k in param_list)\n\n for param, default in zip(list(script_params.keys()), defaults):\n user_input = click.prompt(\"%s\" % param, default=default)\n script_params[param] = ast.literal_eval(user_input)\n\n return script_params", "response": "Ask the user for parameters."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ntrying importing a parameter dictionary from file.", "response": "def import_parameters_from_file(parameters_file):\n \"\"\"\n Try importing a parameter dictionary from file.\n\n We expect values in parameters_file to be defined as follows:\n param1: value1\n param2: [value2, value3]\n \"\"\"\n params = {}\n\n with open(parameters_file, 'r') as f:\n matches = re.findall('(.*): (.*)', f.read())\n\n for m in matches:\n params[m[0]] = ast.literal_eval(m[1])\n\n return params"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nlaunch a single simulation using the specified parameter list.", "response": "def launch_simulation(self, parameter):\n \"\"\"\n Launch a single simulation, using SimulationRunner's facilities.\n\n This function is used by ParallelRunner's run_simulations to map\n simulation running over the parameter list.\n\n Args:\n parameter (dict): the parameter combination to simulate.\n \"\"\"\n return next(SimulationRunner.run_simulations(self, [parameter],\n self.data_folder))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\naliasing of helper. string. serialization. json. stringify", "response": "def stringify(self, obj, beautify=False, raise_exception=False):\n \"\"\"Alias of helper.string.serialization.json.stringify\"\"\"\n\n return self.helper.string.serialization.json.stringify(\n obj=obj,\n beautify=beautify,\n raise_exception=raise_exception)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nalias of helper. string. serialization. json. parse", "response": "def parse(self, text, encoding='utf8', raise_exception=False):\n \"\"\"Alias of helper.string.serialization.json.parse\"\"\"\n\n return self.helper.string.serialization.json.parse(\n text=text,\n encoding=encoding,\n raise_exception=raise_exception)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ninitializes a new instance of the class with the given script commit and parameters.", "response": "def new(cls, script, commit, params, campaign_dir, overwrite=False):\n \"\"\"\n Initialize a new class instance with a set configuration and filename.\n\n The created database has the same name of the campaign directory.\n\n Args:\n script (str): the ns-3 name of the script that will be used in this\n campaign;\n commit (str): the commit of the ns-3 installation that is used to\n run the simulations.\n params (list): a list of the parameters that can be used on the\n script.\n campaign_dir (str): The path of the file where to save the DB.\n overwrite (bool): Whether or not existing directories should be\n overwritten.\n\n \"\"\"\n\n # We only accept absolute paths\n if not Path(campaign_dir).is_absolute():\n raise ValueError(\"Path is not absolute\")\n\n # Make sure the directory does not exist already\n if Path(campaign_dir).exists() and not overwrite:\n raise FileExistsError(\"The specified directory already exists\")\n elif Path(campaign_dir).exists() and overwrite:\n # Verify we are not deleting files belonging to the user\n campaign_dir_name = os.path.basename(campaign_dir)\n folder_contents = set(os.listdir(campaign_dir))\n allowed_files = set(\n ['data', '%s.json' % campaign_dir_name] +\n # Allow hidden files (like .DS_STORE in macos)\n [os.path.basename(os.path.normpath(f)) for f in\n glob.glob(os.path.join(campaign_dir, \".*\"))])\n\n if(not folder_contents.issubset(allowed_files)):\n raise ValueError(\"The specified directory cannot be overwritten\"\n \" because it contains user files.\")\n # This operation destroys data.\n shutil.rmtree(campaign_dir)\n\n # Create the directory and database file in it\n # The indent and separators ensure the database is human readable.\n os.makedirs(campaign_dir)\n tinydb = TinyDB(os.path.join(campaign_dir, \"%s.json\" %\n os.path.basename(campaign_dir)))\n\n # Save the configuration in the database\n config = {\n 'script': script,\n 'commit': commit,\n 'params': sorted(params)\n }\n tinydb.table('config').insert(config)\n\n return cls(tinydb, campaign_dir)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ninitializing from an existing database.", "response": "def load(cls, campaign_dir):\n \"\"\"\n Initialize from an existing database.\n\n It is assumed that the database json file has the same name as its\n containing folder.\n\n Args:\n campaign_dir (str): The path to the campaign directory.\n \"\"\"\n\n # We only accept absolute paths\n if not Path(campaign_dir).is_absolute():\n raise ValueError(\"Path is not absolute\")\n\n # Verify file exists\n if not Path(campaign_dir).exists():\n raise ValueError(\"Directory does not exist\")\n\n # Extract filename from campaign dir\n filename = \"%s.json\" % os.path.split(campaign_dir)[1]\n filepath = os.path.join(campaign_dir, filename)\n\n try:\n # Read TinyDB instance from file\n tinydb = TinyDB(filepath)\n\n # Make sure the configuration is a valid dictionary\n assert set(\n tinydb.table('config').all()[0].keys()) == set(['script',\n 'params',\n 'commit'])\n except:\n # Remove the database instance created by tinydb\n os.remove(filepath)\n raise ValueError(\"Specified campaign directory seems corrupt\")\n\n return cls(tinydb, campaign_dir)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nyield the next RngRun values that can be used in this campaign.", "response": "def get_next_rngruns(self):\n \"\"\"\n Yield the next RngRun values that can be used in this campaign.\n \"\"\"\n available_runs = [result['params']['RngRun'] for result in\n self.get_results()]\n yield from DatabaseManager.get_next_values(available_runs)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ninserting a new result in the database.", "response": "def insert_result(self, result):\n \"\"\"\n Insert a new result in the database.\n\n This function also verifies that the result dictionaries saved in the\n database have the following structure (with {'a': 1} representing a\n dictionary, 'a' a key and 1 its value)::\n\n {\n 'params': {\n 'param1': value1,\n 'param2': value2,\n ...\n 'RngRun': value3\n },\n 'meta': {\n 'elapsed_time': value4,\n 'id': value5\n }\n }\n\n Where elapsed time is a float representing the seconds the simulation\n execution took, and id is a UUID uniquely identifying the result, and\n which is used to locate the output files in the campaign_dir/data\n folder.\n \"\"\"\n\n # This dictionary serves as a model for how the keys in the newly\n # inserted result should be structured.\n example_result = {\n 'params': {k: ['...'] for k in self.get_params() + ['RngRun']},\n 'meta': {k: ['...'] for k in ['elapsed_time', 'id']},\n }\n\n # Verify result format is correct\n if not(DatabaseManager.have_same_structure(result, example_result)):\n raise ValueError(\n '%s:\\nExpected: %s\\nGot: %s' % (\n \"Result dictionary does not correspond to database format\",\n pformat(example_result, depth=1),\n pformat(result, depth=1)))\n\n # Insert result\n self.db.table('results').insert(result)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_results(self, params=None, result_id=None):\n\n # In this case, return all results\n # A cast to dict is necessary, since self.db.table() contains TinyDB's\n # Document object (which is simply a wrapper for a dictionary, thus the\n # simple cast).\n if result_id is not None:\n return [dict(i) for i in self.db.table('results').all() if\n i['meta']['id'] == result_id]\n\n if params is None:\n return [dict(i) for i in self.db.table('results').all()]\n\n # Verify parameter format is correct\n all_params = set(['RngRun'] + self.get_params())\n param_subset = set(params.keys())\n if (not all_params.issuperset(param_subset)):\n raise ValueError(\n '%s:\\nParameters: %s\\nQuery: %s' % (\n 'Specified parameter keys do not match database format',\n all_params,\n param_subset))\n\n # Convert values that are not lists into lists to later perform\n # iteration over values more naturally. Perform this on a new\n # dictionary not to modify the original copy.\n query_params = {}\n for key in params:\n if not isinstance(params[key], list):\n query_params[key] = [params[key]]\n else:\n query_params[key] = params[key]\n\n # Handle case where query params has no keys\n if not query_params.keys():\n return [dict(i) for i in self.db.table('results').all()]\n\n # Create the TinyDB query\n # In the docstring example above, this is equivalent to:\n # AND(OR(param1 == value1), OR(param2 == value2, param2 == value3))\n query = reduce(and_, [reduce(or_, [\n where('params')[key] == v for v in value]) for key, value in\n query_params.items()])\n\n return [dict(i) for i in self.db.table('results').search(query)]", "response": "Returns all the results available from the database that fulfill some parameter combinations."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning a dictionary containing filename values for each output file associated with an id.", "response": "def get_result_files(self, result):\n \"\"\"\n Return a dictionary containing filename: filepath values for each\n output file associated with an id.\n\n Result can be either a result dictionary (e.g., obtained with the\n get_results() method) or a result id.\n \"\"\"\n if isinstance(result, dict):\n result_id = result['meta']['id']\n else: # Should already be a string containing the id\n result_id = result\n\n result_data_dir = os.path.join(self.get_data_dir(), result_id)\n\n filenames = next(os.walk(result_data_dir))[2]\n\n filename_path_pairs = [\n (f, os.path.join(self.get_data_dir(), result_id, f))\n for f in filenames]\n\n return {k: v for k, v in filename_path_pairs}"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_complete_results(self, params=None, result_id=None):\n\n if result_id is not None:\n results = deepcopy(self.get_results(result_id=result_id))\n else:\n results = deepcopy(self.get_results(params))\n\n for r in results:\n r['output'] = {}\n available_files = self.get_result_files(r['meta']['id'])\n for name, filepath in available_files.items():\n with open(filepath, 'r') as file_contents:\n r['output'][name] = file_contents.read()\n\n return results", "response": "Returns the results of the get_results function."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nremoving all results from the database.", "response": "def wipe_results(self):\n \"\"\"\n Remove all results from the database.\n\n This also removes all output files, and cannot be undone.\n \"\"\"\n # Clean results table\n self.db.purge_table('results')\n\n # Get rid of contents of data dir\n map(shutil.rmtree, glob.glob(os.path.join(self.get_data_dir(), '*.*')))"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngiving two dictionaries (possibly with other nested dictionaries as values), this function checks whether they have the same key structure. >>> from sem import DatabaseManager >>> d1 = {'a': 1, 'b': 2} >>> d2 = {'a': [], 'b': 3} >>> d3 = {'a': 4, 'c': 5} >>> DatabaseManager.have_same_structure(d1, d2) True >>> DatabaseManager.have_same_structure(d1, d3) False >>> d4 = {'a': {'c': 1}, 'b': 2} >>> d5 = {'a': {'c': 3}, 'b': 4} >>> d6 = {'a': {'c': 5, 'd': 6}, 'b': 7} >>> DatabaseManager.have_same_structure(d1, d4) False >>> DatabaseManager.have_same_structure(d4, d5) True >>> DatabaseManager.have_same_structure(d4, d6) False", "response": "def have_same_structure(d1, d2):\n \"\"\"\n Given two dictionaries (possibly with other nested dictionaries as\n values), this function checks whether they have the same key structure.\n\n >>> from sem import DatabaseManager\n >>> d1 = {'a': 1, 'b': 2}\n >>> d2 = {'a': [], 'b': 3}\n >>> d3 = {'a': 4, 'c': 5}\n >>> DatabaseManager.have_same_structure(d1, d2)\n True\n >>> DatabaseManager.have_same_structure(d1, d3)\n False\n\n >>> d4 = {'a': {'c': 1}, 'b': 2}\n >>> d5 = {'a': {'c': 3}, 'b': 4}\n >>> d6 = {'a': {'c': 5, 'd': 6}, 'b': 7}\n >>> DatabaseManager.have_same_structure(d1, d4)\n False\n >>> DatabaseManager.have_same_structure(d4, d5)\n True\n >>> DatabaseManager.have_same_structure(d4, d6)\n False\n \"\"\"\n # Keys of this level are the same\n if set(d1.keys()) != set(d2.keys()):\n return False\n\n # Check nested dictionaries\n for k1, k2 in zip(sorted(d1.keys()), sorted(d2.keys())):\n # If one of the values is a dictionary and the other is not\n if isinstance(d1[k1], dict) != isinstance(d2[k2], dict):\n return False\n # If both are dictionaries, recur\n elif isinstance(d1[k1], dict) and isinstance(d2[k2], dict):\n if not DatabaseManager.have_same_structure(d1[k1], d2[k2]):\n return False\n\n return True"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_all_values_of_all_params(self):\n\n values = collections.OrderedDict([[p, []] for p in\n sorted(self.get_params())])\n\n for result in self.get_results():\n for param in self.get_params():\n values[param] += [result['params'][param]]\n\n sorted_values = collections.OrderedDict([[k,\n sorted(list(set(values[k])))]\n for k in values.keys()])\n\n for k in sorted_values.keys():\n if sorted_values[k] == []:\n sorted_values[k] = None\n\n return sorted_values", "response": "Returns a dictionary containing all values that are taken by all available parameters."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nserialize a single object into a string.", "response": "def serialize(self, obj, method='json', beautify=False, raise_exception=False):\n \"\"\"Alias of helper.string.serialization.serialize\"\"\"\n\n return self.helper.string.serialization.serialize(\n obj=obj, method=method, beautify=beautify, raise_exception=raise_exception)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef deserialize(self, text, method='json', encoding='utf8', raise_exception=False):\n\n return self.helper.string.serialization.deserialize(\n text, method=method, encoding=encoding, raise_exception=raise_exception)", "response": "Deserialize a string into a new object."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a list of the parameters made available by the script.", "response": "def get_available_parameters(self):\n \"\"\"\n Return a list of the parameters made available by the script.\n \"\"\"\n\n # At the moment, we rely on regex to extract the list of available\n # parameters. A tighter integration with waf would allow for a more\n # natural extraction of the information.\n\n stdout = self.run_program(\"%s %s\" % (self.script_executable,\n '--PrintHelp'),\n environment=self.environment,\n native_spec=BUILD_GRID_PARAMS)\n\n options = re.findall('.*Program\\s(?:Arguments|Options):(.*)'\n 'General\\sArguments.*',\n stdout, re.DOTALL)\n\n if len(options):\n args = re.findall('.*--(.*?):.*', options[0], re.MULTILINE)\n return args\n else:\n return []"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nrun a program through the grid and returns the output.", "response": "def run_program(self, command, working_directory=os.getcwd(),\n environment=None, cleanup_files=True,\n native_spec=\"-l cputype=intel\"):\n \"\"\"\n Run a program through the grid, capturing the standard output.\n \"\"\"\n try:\n s = drmaa.Session()\n s.initialize()\n jt = s.createJobTemplate()\n jt.remoteCommand = os.path.dirname(\n os.path.abspath(__file__)) + '/run_program.sh'\n jt.args = [command]\n\n if environment is not None:\n jt.jobEnvironment = environment\n\n jt.workingDirectory = working_directory\n jt.nativeSpecification = native_spec\n output_filename = os.path.join(working_directory, 'output.txt')\n jt.outputPath = ':' + output_filename\n jt.joinFiles = True\n\n jobid = s.runJob(jt)\n\n s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n\n with open(output_filename, 'r') as output:\n stdout = output.read()\n\n # Clean up\n if cleanup_files:\n os.remove(output_filename)\n\n finally:\n try:\n s.control(drmaa.JOB_IDS_SESSION_ALL,\n drmaa.JobControlAction.TERMINATE)\n s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True)\n s.exit()\n except(drmaa.errors.NoActiveSessionException):\n pass\n\n return stdout"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef fill_labeled_holes(labels, mask=None, size_fn = None):\n '''Fill all background pixels that are holes inside the foreground\n \n A pixel is a hole inside a foreground object if\n \n * there is no path from the pixel to the edge AND\n \n * there is no path from the pixel to any other non-hole\n pixel AND\n \n * there is no path from the pixel to two similarly-labeled pixels that\n are adjacent to two differently labeled non-hole pixels.\n \n labels - the current labeling\n \n mask - mask of pixels to ignore\n \n size_fn - if not None, it is a function that takes a size and a boolean\n indicating whether it is foreground (True) or background (False)\n The function should return True to analyze and False to ignore\n \n returns a filled copy of the labels matrix\n '''\n #\n # The algorithm:\n #\n # Label the background to get distinct background objects\n # Construct a graph of both foreground and background objects.\n # Walk the graph according to the rules.\n #\n labels_type = labels.dtype\n background = labels == 0\n if mask is not None:\n background &= mask\n \n blabels, count = scind.label(background, four_connect)\n labels = labels.copy().astype(int)\n lcount = np.max(labels)\n labels[blabels != 0] = blabels[blabels != 0] + lcount + 1\n lmax = lcount + count + 1\n is_not_hole = np.ascontiguousarray(np.zeros(lmax + 1, np.uint8))\n #\n # Find the indexes on the edge and use to populate the to-do list\n #\n to_do = np.unique(np.hstack((\n labels[0, :], labels[:, 0], labels[-1, :], labels[:, -1])))\n to_do = to_do[to_do != 0]\n is_not_hole[to_do] = True\n to_do = list(to_do)\n #\n # An array that names the first non-hole object\n #\n adjacent_non_hole = np.ascontiguousarray(np.zeros(lmax + 1), np.uint32)\n #\n # Find all 4-connected adjacent pixels\n # Note that there will be some i, j not in j, i\n #\n i = np.hstack([labels[:-1, :].flatten(), labels[:, :-1].flatten()])\n j = np.hstack([labels[1:, :].flatten(), labels[:, 1:].flatten()])\n i, j = i[i!=j], j[i!=j]\n if (len(i)) > 0:\n order = np.lexsort((j, i))\n i = i[order]\n j = j[order]\n # Remove duplicates and stack to guarantee that j, i is in i, j\n first = np.hstack([[True], (i[:-1] != i[1:]) | (j[:-1] != j[1:])])\n i, j = np.hstack((i[first], j[first])), np.hstack((j[first], i[first]))\n # Remove dupes again. (much shorter)\n order = np.lexsort((j, i))\n i = i[order]\n j = j[order]\n first = np.hstack([[True], (i[:-1] != i[1:]) | (j[:-1] != j[1:])])\n i, j = i[first], j[first]\n #\n # Now we make a ragged array of i and j\n #\n i_count = np.bincount(i)\n if len(i_count) < lmax + 1:\n i_count = np.hstack((i_count, np.zeros(lmax + 1 - len(i_count), int)))\n indexer = Indexes([i_count])\n #\n # Filter using the size function passed, if any\n #\n if size_fn is not None:\n areas = np.bincount(labels.flatten())\n for ii, area in enumerate(areas):\n if (ii > 0 and area > 0 and not is_not_hole[ii] and \n not size_fn(area, ii <= lcount)):\n is_not_hole[ii] = True\n to_do.append(ii)\n\n to_do_count = len(to_do)\n if len(to_do) < len(is_not_hole):\n to_do += [ 0 ] * (len(is_not_hole) - len(to_do))\n to_do = np.ascontiguousarray(np.array(to_do), np.uint32)\n fill_labeled_holes_loop(\n np.ascontiguousarray(i, np.uint32),\n np.ascontiguousarray(j, np.uint32),\n np.ascontiguousarray(indexer.fwd_idx, np.uint32),\n np.ascontiguousarray(i_count, np.uint32),\n is_not_hole, adjacent_non_hole, to_do, lcount, to_do_count)\n #\n # Make an array that assigns objects to themselves and background to 0\n #\n new_indexes = np.arange(len(is_not_hole)).astype(np.uint32)\n new_indexes[(lcount+1):] = 0\n #\n # Fill the holes by replacing the old value by the value of the\n # enclosing object.\n #\n is_not_hole = is_not_hole.astype(bool)\n new_indexes[~is_not_hole] = adjacent_non_hole[~ is_not_hole]\n if mask is not None:\n labels[mask] = new_indexes[labels[mask]]\n else:\n labels = new_indexes[labels]\n return labels.astype(labels_type)", "response": "Fill all background pixels that are holes inside the foreground object."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef adjacent(labels):\n '''Return a binary mask of all pixels which are adjacent to a pixel of \n a different label.\n \n '''\n high = labels.max()+1\n if high > np.iinfo(labels.dtype).max:\n labels = labels.astype(np.int)\n image_with_high_background = labels.copy()\n image_with_high_background[labels == 0] = high\n min_label = scind.minimum_filter(image_with_high_background,\n footprint=np.ones((3,3),bool),\n mode = 'constant',\n cval = high)\n max_label = scind.maximum_filter(labels,\n footprint=np.ones((3,3),bool),\n mode = 'constant',\n cval = 0)\n return (min_label != max_label) & (labels > 0)", "response": "Return a binary mask of all pixels which are adjacent to a pixel of \n a different label."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef binary_shrink_old(image, iterations=-1):\n global binary_shrink_top_right, binary_shrink_bottom_left\n if binary_shrink_top_right is None:\n #\n # None of these patterns can remove both of two isolated\n # eight-connected pixels. Taken together, they can remove any\n # pixel touching a background pixel.\n #\n # The top right pixels:\n # \n # 0xx\n # ..0\n # ...\n #\n binary_shrink_top_right = make_table(False,\n np.array([[0,0,0],\n [0,1,0],\n [0,1,0]],bool),\n np.array([[1,1,1],\n [0,1,0],\n [0,1,0]],bool))\n binary_shrink_top_right &= make_table(False,\n np.array([[0,0,0],\n [0,1,0],\n [1,0,0]], bool),\n np.array([[1,1,1],\n [0,1,1],\n [1,0,1]], bool))\n binary_shrink_top_right &= make_table(False,\n np.array([[0,0,0],\n [1,1,0],\n [0,0,0]], bool),\n np.array([[0,0,1],\n [1,1,1],\n [0,0,1]], bool))\n binary_shrink_top_right &= make_table(False,\n np.array([[0,0,0],\n [1,1,0],\n [0,1,1]], bool),\n np.array([[0,0,1],\n [1,1,1],\n [0,1,0]], bool))\n binary_shrink_top_right &= make_table(False,\n np.array([[0,0,0],\n [0,1,0],\n [0,0,1]], bool),\n np.array([[1,1,1],\n [1,1,0],\n [1,0,1]], bool))\n binary_shrink_top_right &= make_table(False,\n np.array([[0,0,0],\n [0,1,0],\n [1,1,1]], bool),\n np.array([[1,1,1],\n [1,1,0],\n [0,1,1]], bool))\n #\n # bottom left pixels\n #\n # ...\n # 0..\n # xx0\n binary_shrink_bottom_left = make_table(False,\n np.array([[0,1,0],\n [0,1,0],\n [0,0,0]],bool),\n np.array([[0,1,0],\n [0,1,0],\n [1,1,1]],bool))\n binary_shrink_bottom_left &= make_table(False,\n np.array([[0,0,1],\n [0,1,0],\n [0,0,0]], bool),\n np.array([[1,0,1],\n [1,1,0],\n [1,1,1]], bool))\n binary_shrink_bottom_left &= make_table(False,\n np.array([[0,0,0],\n [0,1,1],\n [0,0,0]], bool),\n np.array([[1,0,0],\n [1,1,1],\n [1,0,0]], bool))\n binary_shrink_bottom_left &= make_table(False,\n np.array([[1,1,0],\n [0,1,1],\n [0,0,0]], bool),\n np.array([[0,1,0],\n [1,1,1],\n [1,0,0]], bool))\n binary_shrink_bottom_left &= make_table(False,\n np.array([[1,0,0],\n [0,1,0],\n [0,0,0]], bool),\n np.array([[1,0,1],\n [0,1,1],\n [1,1,1]], bool))\n binary_shrink_bottom_left &= make_table(False,\n np.array([[1,1,1],\n [0,1,0],\n [0,0,0]], bool),\n np.array([[1,1,0],\n [0,1,1],\n [1,1,1]], bool))\n orig_image = image\n index_i, index_j, image = prepare_for_index_lookup(image, False)\n if iterations == -1:\n iterations = len(index_i)\n for i in range(iterations):\n pixel_count = len(index_i)\n for table in (binary_shrink_top_right, \n binary_shrink_bottom_left):\n index_i, index_j = index_lookup(index_i, index_j, \n image, table, 1)\n if len(index_i) == pixel_count:\n break\n image = extract_from_image_lookup(orig_image, index_i, index_j)\n return image", "response": "Shrinks an image by repeatedly removing pixels which have partners\n above to the right and below until idempotent."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nshrinking an image by repeatedly removing pixels which have partners above and below until idempotent", "response": "def binary_shrink(image, iterations=-1):\n \"\"\"Shrink an image by repeatedly removing pixels which have partners\n above, to the left, to the right and below until the image doesn't change\n \n image - binary image to be manipulated\n iterations - # of times to shrink, -1 to shrink until idempotent\n \n There are horizontal/vertical thinners which detect a pixel on\n an edge with an interior pixel either horizontally or vertically\n attached like this:\n 0 0 0\n X 1 X\n X 1 X\n and there are much more specific diagonal thinners which detect\n a pixel on the edge of a diagonal, like this:\n 0 0 0\n 0 1 0\n 0 0 1\n Rotate each of these 4x to get the four directions for each\n \"\"\"\n global erode_table, binary_shrink_ulr_table, binary_shrink_lrl_table\n global binary_shrink_urb_table, binary_shrink_llt_table\n if erode_table is None:\n #\n # The erode table hits all patterns that can be eroded without\n # changing the euler_number\n erode_table = np.array([pattern_of(index)[1,1] and\n (scind.label(pattern_of(index-16))[1] != 1)\n for index in range(512)])\n erode_table[index_of(np.ones((3,3), bool))] = True\n #\n # Each other table is more specific: a specific corner or a specific\n # edge must be on where the corner and edge are not adjacent\n #\n binary_shrink_ulr_table = (\n erode_table | \n (make_table(False, np.array([[0,0,0],\n [1,1,0],\n [0,0,0]], bool),\n np.array([[0,0,0],\n [1,1,1],\n [0,0,0]],bool)) &\n make_table(False, np.array([[1,0,0],\n [0,1,0],\n [0,0,0]],bool),\n np.array([[1,0,0],\n [0,1,1],\n [0,1,1]],bool))))\n binary_shrink_urb_table = (\n erode_table | \n (make_table(False, np.array([[0,1,0],\n [0,1,0],\n [0,0,0]], bool),\n np.array([[0,1,0],\n [0,1,0],\n [0,1,0]],bool)) &\n make_table(False, np.array([[0,0,1],\n [0,1,0],\n [0,0,0]],bool),\n np.array([[0,0,1],\n [1,1,0],\n [1,1,0]],bool))))\n binary_shrink_lrl_table = (\n erode_table |\n (make_table(False, np.array([[0,0,0],\n [0,1,1],\n [0,0,0]], bool),\n np.array([[0,0,0],\n [1,1,1],\n [0,0,0]],bool)) &\n make_table(False, np.array([[0,0,0],\n [0,1,0],\n [0,0,1]], bool),\n np.array([[1,1,0],\n [1,1,0],\n [0,0,1]], bool))))\n binary_shrink_llt_table = (\n erode_table | \n (make_table(False, np.array([[0,0,0],\n [0,1,0],\n [0,1,0]], bool),\n np.array([[0,1,0],\n [0,1,0],\n [0,1,0]],bool)) &\n make_table(False, np.array([[0,0,0],\n [0,1,0],\n [1,0,0]], bool),\n np.array([[0,1,1],\n [0,1,1],\n [1,0,0]], bool))))\n \n orig_image = image\n index_i, index_j, image = prepare_for_index_lookup(image, False)\n if iterations == -1:\n iterations = len(index_i)\n for i in range(iterations):\n pixel_count = len(index_i)\n for table in (binary_shrink_ulr_table, \n binary_shrink_urb_table,\n binary_shrink_lrl_table,\n binary_shrink_llt_table):\n index_i, index_j = index_lookup(index_i, index_j, \n image, table, 1)\n if len(index_i) == pixel_count:\n break\n image = extract_from_image_lookup(orig_image, index_i, index_j)\n return image"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef strel_disk(radius):\n iradius = int(radius)\n x,y = np.mgrid[-iradius:iradius+1,-iradius:iradius+1]\n radius2 = radius * radius\n strel = np.zeros(x.shape)\n strel[x*x+y*y <= radius2] = 1\n return strel", "response": "Create a disk structuring element for morphological operations"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a diamond structuring element for morphological operations", "response": "def strel_diamond(radius):\n \"\"\"Create a diamond structuring element for morphological operations\n \n radius - the offset of the corners of the diamond from the origin\n rounded down (e.g. r=2: (0, 2), (2, 0), (0, -2), (-2, 0))\n \n returns a two-dimensional binary array\n \"\"\"\n iradius = int(radius)\n i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)]\n \n strel = (((i+j) <= radius) & ((i+j) >= -radius) &\n ((i-j) <= radius) & ((i-j) >= -radius))\n return strel"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef strel_line(length, angle):\n angle = float(angle) * np.pi / 180.\n x_off = int(np.finfo(float).eps + np.cos(angle) * length / 2)\n # Y is flipped here because \"up\" is negative\n y_off = -int(np.finfo(float).eps + np.sin(angle) * length / 2)\n x_center = abs(x_off)\n y_center = abs(y_off)\n strel = np.zeros((y_center * 2 + 1, \n x_center * 2 + 1), bool)\n draw_line(strel, \n (y_center - y_off, x_center - x_off), \n (y_center, x_center), True)\n draw_line(strel,\n (y_center + y_off, x_center + x_off), \n (y_center, x_center), True)\n\n return strel", "response": "Create a line structuring element for morphological operations"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef strel_octagon(radius):\n #\n # Inscribe a diamond in a square to get an octagon.\n #\n iradius = int(radius)\n i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)]\n #\n # The distance to the diagonal side is also iradius:\n #\n # iradius ** 2 = i**2 + j**2 and i = j\n # iradius ** 2 = 2 * i ** 2\n # i = iradius / sqrt(2)\n # i + j = iradius * sqrt(2)\n #\n dradius = float(iradius) * np.sqrt(2)\n strel = (((i+j) <= dradius) & ((i+j) >= -dradius) &\n ((i-j) <= dradius) & ((i-j) >= -dradius))\n return strel", "response": "Create an octagonal structuring element for morphological operations"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef strel_rectangle(width, height):\n return np.ones([int((hw - 1) // 2) * 2 + 1 for hw in (height, width)], bool)", "response": "Create a rectangular structuring element\n \n "} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nfinding the local maximum at each point in the image using the given structure", "response": "def cpmaximum(image, structure=np.ones((3,3),dtype=bool),offset=None):\n \"\"\"Find the local maximum at each point in the image, using the given structuring element\n \n image - a 2-d array of doubles\n structure - a boolean structuring element indicating which\n local elements should be sampled\n offset - the offset to the center of the structuring element\n \"\"\"\n center = np.array(structure.shape) // 2\n if offset is None:\n offset = center\n origin = np.array(offset) - center\n return scind.maximum_filter(image, footprint=structure, origin=origin,\n mode='constant', cval=np.min(image))"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef relabel(image):\n #\n # Build a label table that converts an old label # into\n # labels using the new numbering scheme\n #\n unique_labels = np.unique(image[image!=0])\n if len(unique_labels) == 0:\n return (image,0)\n consecutive_labels = np.arange(len(unique_labels))+1\n label_table = np.zeros(unique_labels.max()+1, int)\n label_table[unique_labels] = consecutive_labels\n #\n # Use the label table to remap all of the labels\n #\n new_image = label_table[image]\n return (new_image,len(unique_labels))", "response": "Relabels each object in the object list into a new object list."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngiving a binary image return an image of the convex hull", "response": "def convex_hull_image(image):\n '''Given a binary image, return an image of the convex hull'''\n labels = image.astype(int)\n points, counts = convex_hull(labels, np.array([1]))\n output = np.zeros(image.shape, int)\n for i in range(counts[0]):\n inext = (i+1) % counts[0]\n draw_line(output, points[i,1:], points[inext,1:],1)\n output = fill_labeled_holes(output)\n return output == 1"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ngive a labeled image return a list of points per object ordered by the interior point.", "response": "def convex_hull(labels, indexes=None, fast=True):\n \"\"\"Given a labeled image, return a list of points per object ordered by\n angle from an interior point, representing the convex hull.s\n \n labels - the label matrix\n indexes - an array of label #s to be processed, defaults to all non-zero\n labels\n \n Returns a matrix and a vector. The matrix consists of one row per\n point in the convex hull. Each row has three columns, the label #,\n the i coordinate of the point and the j coordinate of the point. The\n result is organized first by label, then the points are arranged\n counter-clockwise around the perimeter.\n The vector is a vector of #s of points in the convex hull per label\n \"\"\"\n if indexes is None:\n indexes = np.unique(labels)\n indexes.sort()\n indexes=indexes[indexes!=0]\n else:\n indexes=np.array(indexes)\n if len(indexes) == 0:\n return np.zeros((0,2),int),np.zeros((0,),int)\n #\n # Reduce the # of points to consider\n #\n outlines = outline(labels)\n coords = np.argwhere(outlines > 0).astype(np.int32)\n if len(coords)==0:\n # Every outline of every image is blank\n return (np.zeros((0,3),int),\n np.zeros((len(indexes),),int))\n \n i = coords[:,0]\n j = coords[:,1]\n labels_per_point = labels[i,j]\n pixel_labels = np.column_stack((i,j,labels_per_point))\n return convex_hull_ijv(pixel_labels, indexes, fast)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the convex hull for each label using an ijv labeling pixel_labels and indexes", "response": "def convex_hull_ijv(pixel_labels, indexes, fast=True):\n '''Return the convex hull for each label using an ijv labeling\n \n pixel_labels: the labeling of the pixels in i,j,v form where\n i & j are the coordinates of a pixel and v is\n the pixel's label number\n indexes: the indexes at which to measure the convex hull\n\n Returns a matrix and a vector. The matrix consists of one row per\n point in the convex hull. Each row has three columns, the label #,\n the i coordinate of the point and the j coordinate of the point. The\n result is organized first by label, then the points are arranged\n counter-clockwise around the perimeter.\n The vector is a vector of #s of points in the convex hull per label\n '''\n if fast:\n return _convex_hull.convex_hull_ijv(pixel_labels, indexes)\n\n # We keep this code for testing.\n if len(indexes) == 0:\n return np.zeros((0,3),int),np.zeros((0,),int)\n #\n # An array that converts from label # to index in \"indexes\"\n anti_indexes = np.zeros((np.max(indexes)+1,),int)\n anti_indexes[indexes] = list(range(len(indexes)))\n\n coords = pixel_labels[:,:2]\n i = coords[:, 0]\n j = coords[:, 1]\n # This disgusting copy spooge appears to be needed for scipy 0.7.0\n labels_per_point = np.zeros(len(pixel_labels), int)\n labels_per_point[:] = pixel_labels[:,2]\n #\n # Calculate the centers for each label\n #\n center_i = fixup_scipy_ndimage_result(\n scind.mean(i.astype(float), labels_per_point, indexes))\n center_j = fixup_scipy_ndimage_result(\n scind.mean(j.astype(float), labels_per_point, indexes))\n centers = np.column_stack((center_i, center_j))\n #\n # Now make an array with one outline point per row and the following\n # columns:\n #\n # index of label # in indexes array\n # angle of the point relative to the center\n # i coordinate of the point\n # j coordinate of the point\n #\n anti_indexes_per_point = anti_indexes[labels_per_point]\n centers_per_point = centers[anti_indexes_per_point]\n angle = np.arctan2(i-centers_per_point[:,0],j-centers_per_point[:,1])\n a = np.zeros((len(i),3), np.int32)\n a[:,0] = anti_indexes_per_point\n a[:,1:] = coords\n #\n # Sort the array first by label # (sort of), then by angle\n #\n order = np.lexsort((angle,anti_indexes_per_point))\n #\n # Make unique\n #\n same_as_next = np.hstack([np.all(a[order[:-1],:] == a[order[1:],:], 1), [False]])\n order = order[~same_as_next]\n a=a[order]\n anti_indexes_per_point = anti_indexes_per_point[order]\n angle = angle[order]\n centers_per_point = centers_per_point[order]\n #\n # Make the result matrix, leaving enough space so that all points might\n # be on the convex hull.\n #\n result = np.zeros((len(order),3), np.int32)\n result[:,0] = labels_per_point[order]\n #\n # Create an initial count vector\n #\n v = np.ones((a.shape[0],),dtype = np.int32)\n result_counts = scipy.sparse.coo_matrix((v,(a[:,0],v*0)),\n shape=(len(indexes),1))\n result_counts = result_counts.toarray().flatten()\n r_anti_indexes_per_point = anti_indexes_per_point # save this\n #\n # Create a vector that indexes into the results for each label\n #\n result_index = np.zeros(result_counts.shape, np.int32)\n result_index[1:]=np.cumsum(result_counts[:-1])\n #\n # Initialize the counts of convex hull points to a ridiculous number\n #\n counts = np.iinfo(np.int32).max\n first_pass = True\n while True:\n #\n # Figure out how many putative convex hull points there are for\n # each label.\n #\n # If the count for a label is 3 or less, it's a convex hull or\n # degenerate case.\n #\n # If the count hasn't changed in an iteration, then we've done\n # as well as we can hope to do.\n #\n v = np.ones((a.shape[0],),dtype = np.int32)\n new_counts = scipy.sparse.coo_matrix((v,(a[:,0],v*0)),\n shape=(len(indexes),1))\n new_counts = new_counts.toarray().flatten()\n done_count = (2 if first_pass else 3)\n finish_me = ((new_counts > 0) & \n ((new_counts <= done_count) | \n (new_counts == counts)))\n indexes_to_finish = np.argwhere(finish_me).astype(np.int32).squeeze()\n keep_me = (new_counts > done_count) & (new_counts < counts)\n indexes_to_keep = np.argwhere(keep_me).astype(np.int32).squeeze()\n if len(indexes_to_finish):\n result_counts[finish_me] = new_counts[finish_me]\n #\n # Store the coordinates of each of the points to finish\n #\n finish_this_row = finish_me[a[:,0]]\n rows_to_finish = np.argwhere(finish_this_row).flatten()\n a_to_finish = a[rows_to_finish]\n atf_indexes = a_to_finish[:,0]\n #\n # Map label #s to the index into indexes_to_finish of that label #\n #\n anti_indexes_to_finish = np.zeros((len(indexes),), np.int32)\n anti_indexes_to_finish[indexes_to_finish] = list(range(len(indexes_to_finish)))\n #\n # Figure out the indices of each point in a label to be finished.\n # We figure out how much to subtract for each label, then\n # subtract that much from 0:N to get successive indexes at\n # each label.\n # Then we add the result_index to figure out where to store it\n # in the result table.\n #\n finish_idx_base = np.zeros((len(indexes_to_finish),), np.int32)\n finish_idx_base[1:]=np.cumsum(new_counts[indexes_to_finish])[:-1]\n finish_idx_bases = finish_idx_base[anti_indexes_to_finish[atf_indexes]]\n finish_idx = (np.array(list(range(a_to_finish.shape[0])))-\n finish_idx_bases)\n finish_idx = finish_idx + result_index[atf_indexes]\n result[finish_idx,1:] = a_to_finish[:,1:]\n if len(indexes_to_keep) == 0:\n break\n #\n # Figure out which points are still available\n #\n rows_to_keep = np.argwhere(keep_me[a[:,0].astype(np.int32)]).flatten()\n rows_to_keep = rows_to_keep.astype(np.int32)\n a = a[rows_to_keep]\n centers_per_point = centers_per_point[rows_to_keep]\n counts = new_counts\n #\n # The rule is that the area of the triangle from the center to\n # point N-1 to point N plus the area of the triangle from the center\n # to point N to point N+1 must be greater than the area of the\n # triangle from the center to point N-1 to point N+1 for a point\n # to be on the convex hull.\n # N-1 and N+1 have to be modulo \"counts\", so we make special arrays\n # to address those situations.\n #\n anti_indexes_to_keep = np.zeros((len(indexes),), np.int32)\n anti_indexes_to_keep[indexes_to_keep] = list(range(len(indexes_to_keep)))\n idx_base = np.zeros((len(indexes_to_keep),), np.int32)\n idx_base[1:]=np.cumsum(counts[keep_me])[0:-1]\n idx_bases = idx_base[anti_indexes_to_keep[a[:,0]]]\n counts_per_pt = counts[a[:,0]]\n idx = np.array(list(range(a.shape[0])), np.int32)-idx_bases\n n_minus_one = np.mod(idx+counts_per_pt-1,counts_per_pt)+idx_bases\n n_plus_one = np.mod(idx+1,counts_per_pt)+idx_bases\n #\n # Compute the triangle areas\n #\n t_left_plus_right = (triangle_areas(centers_per_point,\n a[n_minus_one, 1:],\n a[:, 1:]) +\n triangle_areas(centers_per_point,\n a[:, 1:],\n a[n_plus_one, 1:]))\n t_lr = triangle_areas(centers_per_point,\n a[n_minus_one, 1:], a[n_plus_one, 1:])\n #\n # Keep the points where the area of the left triangle plus the\n # area of the right triangle is bigger than the area of the triangle\n # composed of the points to the left and right. This means that\n # there's a little triangle sitting on top of t_lr with our point\n # on top and convex in relation to its neighbors.\n #\n keep_me = t_left_plus_right > t_lr\n #\n # If all points on a line are co-linear with the center, then the\n # whole line goes away. Special handling for this to find the points\n # most distant from the center and on the same side\n #\n consider_me = t_left_plus_right == 0\n del t_lr, t_left_plus_right\n if np.any(consider_me):\n diff_i = a[:,1]-centers_per_point[:,0]\n diff_j = a[:,2]-centers_per_point[:,1]\n #\n # The manhattan distance is good enough\n #\n dist = np.abs(diff_i)+np.abs(diff_j)\n # The sign is different on different sides of a line including\n # the center. Multiply j by 2 to keep from colliding with i\n #\n # If both signs are zero, then the point is in the center\n #\n # Compute np.sign(diff_i, out=diff_i) + np.sign(diff_j) * 2,\n # but without any temporaries.\n sign = np.sign(diff_j, diff_j)\n sign *= 2\n sign += np.sign(diff_i)\n del diff_i, diff_j\n\n n_minus_one_consider = n_minus_one[consider_me]\n n_plus_one_consider = n_plus_one[consider_me]\n left_is_worse = ((dist[consider_me] > dist[n_minus_one_consider]) |\n (sign[consider_me] != sign[n_minus_one_consider]))\n right_is_worse = ((dist[consider_me] > dist[n_plus_one_consider]) |\n (sign[consider_me] != sign[n_plus_one_consider]))\n to_keep = left_is_worse & right_is_worse & (sign[consider_me] != 0)\n keep_me[consider_me] = to_keep\n del dist, sign\n a = a[keep_me,:]\n centers_per_point = centers_per_point[keep_me]\n del keep_me\n first_pass = False\n #\n # Finally, we have to shrink the results. We number each of the\n # points for a label, then only keep those whose indexes are\n # less than the count for their label.\n #\n within_label_index = np.array(list(range(result.shape[0])), np.int32)\n counts_per_point = result_counts[r_anti_indexes_per_point]\n result_indexes_per_point = result_index[r_anti_indexes_per_point] \n within_label_index = (within_label_index - result_indexes_per_point)\n result = result[within_label_index < counts_per_point]\n return result, result_counts"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nfill the convex hulls of objects in the object tree.", "response": "def fill_convex_hulls(ch_pts, ch_counts):\n '''Return the points within the convex hulls of objects\n \n ch_pts - a Nx3 array of columns of label #, i and j as output by convex_hull\n ch_counts - the number of points per object\n \n returns the points in ijv format.\n '''\n if len(ch_pts) == 0:\n return np.zeros((0,3), int)\n #\n # Connect the last line to the first\n #\n ch_counts = ch_counts[ch_counts != 0]\n indexer_in = Indexes(ch_counts)\n #\n # Get line ends. This involves connecting the last\n # point with the first.\n #\n idx_1 = np.arange(ch_pts.shape[0]) + 1\n idx_1[indexer_in.fwd_idx+indexer_in.counts - 1] = indexer_in.fwd_idx\n ch_pts1 = ch_pts[idx_1, 1:]\n #\n # Get J at each I (floating point) for each line\n # Generate two points (min J, max J) for horizontal lines\n #\n n_i = np.abs(ch_pts[:, 1] - ch_pts1[:, 0]) + 1\n horizontal = n_i == 1\n if (np.any(horizontal)):\n i_horiz = ch_pts[horizontal, 1]\n l_horiz = ch_pts[horizontal, 0]\n j_first_horiz = ch_pts[horizontal, 2]\n j_last_horiz = ch_pts1[horizontal, 1]\n else:\n i_horiz = l_horiz = j_first_horiz = j_last_horiz = np.zeros(0, int)\n \n if (np.any(~ horizontal)):\n ch_pts, ch_pts1, n_i = [x[~horizontal] for x in (ch_pts, ch_pts1, n_i)]\n indexer = Indexes(n_i)\n l = ch_pts[indexer.rev_idx, 0]\n sign = np.sign(ch_pts1[:, 0] - ch_pts[:, 1])\n i = ch_pts[indexer.rev_idx, 1] + sign[indexer.rev_idx] * indexer.idx[0]\n j = ch_pts[indexer.rev_idx, 2] + \\\n indexer.idx[0] * (ch_pts1[indexer.rev_idx, 1] - \n ch_pts[indexer.rev_idx, 2]).astype(float) / \\\n (n_i[indexer.rev_idx] - 1)\n else:\n i = l = np.zeros(0, int)\n j = np.zeros(0)\n line_labels = np.concatenate((l_horiz, l_horiz, l))\n i = np.concatenate((i_horiz, i_horiz, i))\n j = np.concatenate((j_first_horiz.astype(float),\n j_last_horiz.astype(float), j))\n #\n # Sort by label, then i, then j\n #\n order = np.lexsort((j, i, line_labels))\n i, j, line_labels = i[order], j[order], line_labels[order]\n #\n # Find the min and max j for every i and label. Then we\n # can make runs of them.\n #\n firsts = np.hstack(\n [[0], \n np.argwhere((line_labels[:-1] != line_labels[1:]) | \n (i[:-1] != i[1:])).flatten() + 1, \n [len(line_labels)]])\n counts = firsts[1:] - firsts[:-1]\n run_indexer = Indexes(counts)\n #\n # Find the first and last J for each i and create rasters of j\n #\n i = i[run_indexer.fwd_idx]\n j0 = np.ceil(j[run_indexer.fwd_idx]).astype(int)\n j1 = np.floor(j[run_indexer.fwd_idx + run_indexer.counts[0]-1]).astype(int)\n l = line_labels[run_indexer.fwd_idx]\n out_indexer = Indexes(j1-j0+1)\n ijv = np.column_stack((\n i[out_indexer.rev_idx], j0[out_indexer.rev_idx]+out_indexer.idx[0],\n l[out_indexer.rev_idx]))\n return ijv"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ndraws a line between two points pt0 and pt1.", "response": "def draw_line(labels,pt0,pt1,value=1):\n \"\"\"Draw a line between two points\n \n pt0, pt1 are in i,j format which is the reverse of x,y format\n Uses the Bresenham algorithm\n Some code transcribed from http://www.cs.unc.edu/~mcmillan/comp136/Lecture6/Lines.html\n \"\"\"\n y0,x0 = pt0\n y1,x1 = pt1\n diff_y = abs(y1-y0)\n diff_x = abs(x1-x0)\n x = x0\n y = y0\n labels[y,x]=value\n step_x = (x1 > x0 and 1) or -1\n step_y = (y1 > y0 and 1) or -1\n if diff_y > diff_x:\n # Y varies fastest, do x before y\n remainder = diff_x*2 - diff_y\n while y != y1:\n if remainder >= 0:\n x += step_x\n remainder -= diff_y*2\n y += step_y\n remainder += diff_x*2\n labels[y,x] = value\n else:\n remainder = diff_y*2 - diff_x\n while x != x1:\n if remainder >= 0:\n y += step_y\n remainder -= diff_x*2\n x += step_x\n remainder += diff_y*2\n labels[y,x] = value"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nretrieve the coordinates of the points along lines in a single line.", "response": "def get_line_pts(pt0i, pt0j, pt1i, pt1j):\n '''Retrieve the coordinates of the points along lines\n \n pt0i, pt0j - the starting coordinates of the lines (1-d nparray)\n pt1i, pt1j - the ending coordinates of the lines (1-d nparray)\n \n use the Bresenham algorithm to find the coordinates along the lines\n connectiong pt0 and pt1. pt01, pt0j, pt1i and pt1j must be 1-d arrays\n of similar size and must be of integer type.\n \n The results are returned as four vectors - index, count, i, j.\n index is the index of the first point in the line for each coordinate pair\n count is the # of points in the line\n i is the I coordinates for each point\n j is the J coordinate for each point\n '''\n assert len(pt0i) == len(pt0j)\n assert len(pt0i) == len(pt1i)\n assert len(pt0i) == len(pt1j)\n pt0i = np.array(pt0i, int)\n pt0j = np.array(pt0j, int)\n pt1i = np.array(pt1i, int)\n pt1j = np.array(pt1j, int)\n if len(pt0i) == 0:\n # Return four zero-length arrays if nothing passed in\n return [np.zeros((0,),int)] * 4\n #\n # The Bresenham algorithm picks the coordinate that varies the most\n # and generates one point for each step in that coordinate. Add one\n # for the start point.\n #\n diff_i = np.abs(pt0i - pt1i)\n diff_j = np.abs(pt0j - pt1j)\n count = np.maximum(diff_i, diff_j).astype(int) + 1\n #\n # The indexes of the ends of the coordinate vectors are at the\n # cumulative sum of the counts. We get to the starts by subtracting\n # the count.\n #\n index = np.cumsum(count) - count\n #\n # Find the step directions per coordinate pair. \n # True = 1, False = 0\n # True * 2 - 1 = 1, False * 2 - 1 = -1\n #\n step_i = (pt1i > pt0i).astype(int) * 2 - 1\n step_j = (pt1j > pt0j).astype(int) * 2 - 1\n #\n # Make arrays to hold the results\n #\n n_pts = index[-1] + count[-1]\n i = np.zeros(n_pts, int)\n j = np.zeros(n_pts, int)\n #\n # Put pt0 into the arrays\n #\n i[index] = pt0i\n j[index] = pt0j\n #\n # # # # # # # # # #\n #\n # Do the points for which I varies most (or it's a tie).\n #\n mask = (diff_i >= diff_j)\n count_t = count[mask]\n if len(count_t) > 0:\n last_n = np.max(count_t)\n diff_i_t = diff_i[mask]\n diff_j_t = diff_j[mask]\n remainder = diff_j_t * 2 - diff_i_t\n current_i = pt0i[mask]\n current_j = pt0j[mask]\n index_t = index[mask]\n step_i_t = step_i[mask]\n step_j_t = step_j[mask]\n for n in range(1,last_n+1):\n #\n # Eliminate all points that are done\n #\n mask = (count_t > n)\n remainder = remainder[mask]\n current_i = current_i[mask]\n current_j = current_j[mask]\n index_t = index_t[mask]\n count_t = count_t[mask]\n diff_i_t = diff_i_t[mask]\n diff_j_t = diff_j_t[mask]\n step_i_t = step_i_t[mask]\n step_j_t = step_j_t[mask]\n #\n # Take a step in the J direction if the remainder is positive\n #\n remainder_mask = (remainder >= 0)\n current_j[remainder_mask] += step_j_t[remainder_mask]\n remainder[remainder_mask] -= diff_i_t[remainder_mask] * 2\n #\n # Always take a step in the I direction\n #\n current_i += step_i_t\n remainder += diff_j_t * 2\n i[index_t+n] = current_i\n j[index_t+n] = current_j\n #\n # # # # # # # # # #\n #\n # Do the points for which J varies most\n #\n mask = (diff_j > diff_i)\n count_t = count[mask]\n if len(count_t) > 0:\n last_n = np.max(count_t)\n diff_i_t = diff_i[mask]\n diff_j_t = diff_j[mask]\n remainder = diff_i_t * 2 - diff_j_t\n current_i = pt0i[mask]\n current_j = pt0j[mask]\n index_t = index[mask]\n step_i_t = step_i[mask]\n step_j_t = step_j[mask]\n for n in range(1,last_n+1):\n #\n # Eliminate all points that are done\n #\n mask = (count_t > n)\n remainder = remainder[mask]\n current_i = current_i[mask]\n current_j = current_j[mask]\n index_t = index_t[mask]\n count_t = count_t[mask]\n diff_i_t = diff_i_t[mask]\n diff_j_t = diff_j_t[mask]\n step_i_t = step_i_t[mask]\n step_j_t = step_j_t[mask]\n #\n # Take a step in the I direction if the remainder is positive\n #\n remainder_mask = (remainder >= 0)\n current_i[remainder_mask] += step_i_t[remainder_mask]\n remainder[remainder_mask] -= diff_j_t[remainder_mask] * 2\n #\n # Always take a step in the J direction\n #\n current_j += step_j_t\n remainder += diff_i_t * 2\n i[index_t+n] = current_i\n j[index_t+n] = current_j\n return index, count, i, j"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nconvert a series of polygon lines to a binary array of the given shape.", "response": "def polygon_lines_to_mask(pt0i, pt0j, pt1i, pt1j, shape):\n '''Convert a series of polygon lines to a background / foreground mask\n \n pt0i, pt0j, pt1i, pt1j - start / end points of lines. Points are rounded\n to the nearest integer coordinate if float.\n \n shape - shape of the mask array\n \n This algorithm assumes that the lines form closed polygons. The lines\n can be in any desired order.\n returns a binary array of the given shape with the interiors of\n polygons filled in.\n \n The algorithm assumes that polygons do not overlap. Overlapping polygons\n should be evaluated separately and their results should be or'ed\n '''\n #\n # Use get_line_pts to find the points along the lines. Mark all of\n # these points as within the mask.\n #\n # Add all points along all lines, except for those whose i coordinate\n # is at the minimum for that line. This avoids doubly counting in cases\n # such as this:\n #\n # O\n # | \\\n # O 0\n # | /\n # O\n #\n # and this\n #\n # O\n # |\\\n # O---0 \\\n #\n # Also remove all points in a line at a particular i but the last to\n # correct for this:\n #\n # 0\n # | + +\n # | + + <- count = 3\n # 0 + + + + 0\n #\n # Use a sparse matrix to sum the hits at every pixel. Some pixels will have\n # double hits:\n #\n # 1 1\n # 1 1\n # 2\n #\n # Also, if two polygons share a common side, that side will be doubly\n # numbered and will disappear.\n #\n #Pixels with odd numbers are foreground and with\n # even numbers are background.\n #\n # 1 1 1 1 2 <- 2 already included because it's on a line.\n # 1 1 2\n # 2\n #\n pt0i, pt0j, pt1i, pt1j = [np.round(np.atleast_1d(x)).astype(int)\n for x in (pt0i, pt0j, pt1i, pt1j)]\n result = np.zeros(shape, bool)\n discard_me = pt0i == pt1i\n index, count, i, j = get_line_pts(pt0i[discard_me], pt0j[discard_me],\n pt1i[discard_me], pt1j[discard_me])\n result[i, j] = True\n pt0i, pt0j, pt1i, pt1j = [x[~discard_me] for x in (pt0i, pt0j, pt1i, pt1j)]\n reverse_me = pt0i > pt1i\n pt0i[reverse_me], pt0j[reverse_me], pt1i[reverse_me], pt1j[reverse_me] = \\\n pt1i[reverse_me], pt1j[reverse_me], pt0i[reverse_me], pt0j[reverse_me]\n index, count, i, j = get_line_pts(pt0i, pt0j, pt1i, pt1j)\n #\n # Mark all i,j as foreground\n #\n result[i, j] = True\n #\n # Remove any pixel whose I is pt0i\n #\n indexer = Indexes(count)\n dont_use = pt0i[indexer.rev_idx] == i\n #\n # Remove any pixel which is from the same line as the next pixel\n # and has the same \"i\"\n #\n dont_use[:-1] |= ((indexer.rev_idx[:-1] == indexer.rev_idx[1:]) &\n (i[:-1] == i[1:]))\n i, j = i[~ dont_use], j[~ dont_use]\n #\n # Create a sparse matrix to do the counting, then convert.\n #\n hitcounts = scipy.sparse.coo_matrix((np.ones(len(i), int), (i, j)),\n shape = shape,\n dtype = np.uint16).toarray()\n #\n # Scan along the I direction. Odd pixels are inside.\n #\n result[(np.cumsum(hitcounts, 1) % 2) == 1] = True\n return result"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef fixup_scipy_ndimage_result(whatever_it_returned):\n if getattr(whatever_it_returned,\"__getitem__\",False):\n return np.array(whatever_it_returned)\n else:\n return np.array([whatever_it_returned])", "response": "Convert a result from scipy. ndimage to a numpy array\n \n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef centers_of_labels(labels):\n '''Return the i,j coordinates of the centers of a labels matrix\n \n The result returned is an 2 x n numpy array where n is the number\n of the label minus one, result[0,x] is the i coordinate of the center\n and result[x,1] is the j coordinate of the center.\n You can unpack the result as \"i,j = centers_of_labels(labels)\"\n '''\n max_labels = np.max(labels)\n if max_labels == 0:\n return np.zeros((2,0),int)\n \n result = scind.center_of_mass(np.ones(labels.shape),\n labels,\n np.arange(max_labels)+1)\n result = np.array(result)\n if result.ndim == 1:\n result.shape = (2,1)\n return result\n return result.transpose()", "response": "Return the i j coordinates of the centers of a set of labels."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns the i j coordinates of the maximum value within each object within each label.", "response": "def maximum_position_of_labels(image, labels, indices):\n '''Return the i,j coordinates of the maximum value within each object\n \n image - measure the maximum within this image\n labels - use the objects within this labels matrix\n indices - label #s to measure\n \n The result returned is an 2 x n numpy array where n is the number\n of the label minus one, result[0,x] is the i coordinate of the center\n and result[x,1] is the j coordinate of the center.\n '''\n \n if len(indices) == 0:\n return np.zeros((2,0),int)\n \n result = scind.maximum_position(image, labels, indices)\n result = np.array(result,int)\n if result.ndim == 1:\n result.shape = (2,1)\n return result\n return result.transpose()"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef minimum_enclosing_circle(labels, indexes = None, \n hull_and_point_count = None):\n \"\"\"Find the location of the minimum enclosing circle and its radius\n \n labels - a labels matrix\n indexes - an array giving the label indexes to be processed\n hull_and_point_count - convex_hull output if already done. None = calculate\n \n returns an Nx3 array organized as i,j of the center and radius\n Algorithm from \n http://www.personal.kent.edu/~rmuhamma/Compgeometry/MyCG/CG-Applets/Center/centercli.htm\n who calls it the Applet's Algorithm and ascribes it to Pr. Chrystal\n The original citation is Professor Chrystal, \"On the problem to construct\n the minimum circle enclosing n given points in a plane\", Proceedings of\n the Edinburgh Mathematical Society, vol 3, 1884\n \"\"\"\n if indexes is None:\n if hull_and_point_count is not None:\n indexes = np.array(np.unique(hull_and_point_count[0][:,0]),dtype=np.int32)\n else:\n max_label = np.max(labels)\n indexes = np.array(list(range(1,max_label+1)),dtype=np.int32)\n else:\n indexes = np.array(indexes,dtype=np.int32)\n if indexes.shape[0] == 0:\n return np.zeros((0,2)),np.zeros((0,))\n\n if hull_and_point_count is None:\n hull, point_count = convex_hull(labels, indexes)\n else:\n hull, point_count = hull_and_point_count\n centers = np.zeros((len(indexes),2))\n radii = np.zeros((len(indexes),))\n #\n # point_index is the index to the first point in \"hull\" for a label\n #\n point_index = np.zeros((indexes.shape[0],),int)\n point_index[1:] = np.cumsum(point_count[:-1]) \n #########################################################################\n #\n # The algorithm is this:\n # * Choose a line S from S0 to S1 at random from the set of adjacent\n # S0 and S1\n # * For every vertex (V) other than S, compute the angle from S0\n # to V to S. If this angle is obtuse, the vertex V lies within the\n # minimum enclosing circle and can be ignored.\n # * Find the minimum angle for all V.\n # If the minimum angle is obtuse, stop and accept S as the diameter of \n # the circle.\n # * If the vertex with the minimum angle makes angles S0-S1-V and\n # S1-S0-V that are acute and right, then take S0, S1 and V as the\n # triangle within the circumscribed minimum enclosing circle.\n # * Otherwise, find the largest obtuse angle among S0-S1-V and\n # S1-S0-V (V is the vertex with the minimum angle, not all of them).\n # If S0-S1-V is obtuse, make V the new S1, otherwise make V the new S0\n #\n ##########################################################################\n #\n # anti_indexes is used to transform a label # into an index in the above array\n # anti_indexes_per_point gives the label index of any vertex\n #\n anti_indexes=np.zeros((np.max(indexes)+1,),int)\n anti_indexes[indexes] = list(range(indexes.shape[0]))\n anti_indexes_per_point = anti_indexes[hull[:,0]]\n #\n # Start out by eliminating the degenerate cases: 0, 1 and 2\n #\n centers[point_count==0,:]= np.NaN\n if np.all(point_count == 0):\n # Bail if there are no points in any hull to prevent\n # index failures below.\n return centers,radii\n \n centers[point_count==1,:]=hull[point_index[point_count==1],1:]\n radii[point_count < 2]=0\n centers[point_count==2,:]=(hull[point_index[point_count==2],1:]+\n hull[point_index[point_count==2]+1,1:])/2\n distance = centers[point_count==2,:] - hull[point_index[point_count==2],1:]\n radii[point_count==2]=np.sqrt(distance[:,0]**2+distance[:,1]**2)\n #\n # Get rid of the degenerate points\n #\n keep_me = point_count > 2\n #\n # Pick S0 as the first point in each label\n # and S1 as the second.\n #\n s0_idx = point_index.copy()\n s1_idx = s0_idx+1\n #\n # number each of the points in a label with an index # which gives\n # the order in which we'll get their angles. We use this to pick out\n # points # 2 to N which are the candidate vertices to S\n # \n within_label_indexes = (np.array(list(range(hull.shape[0])),int) -\n point_index[anti_indexes_per_point])\n \n while(np.any(keep_me)):\n #############################################################\n # Label indexing for active labels\n #############################################################\n #\n # labels_to_consider contains the labels of the objects which\n # have not been completed\n #\n labels_to_consider = indexes[keep_me]\n #\n # anti_indexes_to_consider gives the index into any vector\n # shaped similarly to labels_to_consider (for instance, min_angle\n # below) for every label in labels_to_consider.\n #\n anti_indexes_to_consider =\\\n np.zeros((np.max(labels_to_consider)+1,),int)\n anti_indexes_to_consider[labels_to_consider] = \\\n np.array(list(range(labels_to_consider.shape[0])))\n ##############################################################\n # Vertex indexing for active vertexes other than S0 and S1\n ##############################################################\n #\n # The vertices are hull-points with indexes of 2 or more\n # keep_me_vertices is a mask of the vertices to operate on\n # during this iteration\n #\n keep_me_vertices = np.logical_and(keep_me[anti_indexes_per_point],\n within_label_indexes >= 2)\n #\n # v is the vertex coordinates for each vertex considered\n #\n v = hull[keep_me_vertices,1:]\n #\n # v_labels is the label from the label matrix for each vertex\n #\n v_labels = hull[keep_me_vertices,0]\n #\n # v_indexes is the index into \"hull\" for each vertex (and similarly\n # shaped vectors such as within_label_indexes\n #\n v_indexes=np.argwhere(keep_me_vertices).flatten().astype(np.int32)\n #\n # anti_indexes_per_vertex gives the index into \"indexes\" and\n # any similarly shaped array of per-label values\n # (for instance s0_idx) for each vertex being considered\n #\n anti_indexes_per_vertex = anti_indexes_per_point[keep_me_vertices]\n #\n # anti_indexes_to_consider_per_vertex gives the index into any\n # vector shaped similarly to labels_to_consider for each\n # vertex being analyzed\n #\n anti_indexes_to_consider_per_vertex = anti_indexes_to_consider[v_labels]\n #\n # Get S0 and S1 per vertex\n #\n s0 = hull[s0_idx[keep_me],1:]\n s1 = hull[s1_idx[keep_me],1:]\n s0 = s0[anti_indexes_to_consider_per_vertex]\n s1 = s1[anti_indexes_to_consider_per_vertex]\n #\n # Compute the angle S0-S1-V\n #\n # the first vector of the angles is between S0 and S1\n #\n s01 = (s0 - s1).astype(float)\n #\n # compute V-S1 and V-S0 at each of the vertices to be considered\n #\n vs0 = (v - s0).astype(float)\n vs1 = (v - s1).astype(float) \n #\n #` Take the dot product of s01 and vs1 divided by the length of s01 *\n # the length of vs1. This gives the cosine of the angle between.\n #\n dot_vs1s0 = (np.sum(s01*vs1,1) /\n np.sqrt(np.sum(s01**2,1)*np.sum(vs1**2,1)))\n angle_vs1s0 = np.abs(np.arccos(dot_vs1s0))\n s10 = -s01\n dot_vs0s1 = (np.sum(s10*vs0,1) /\n np.sqrt(np.sum(s01**2,1)*np.sum(vs0**2,1)))\n angle_vs0s1 = np.abs(np.arccos(dot_vs0s1))\n #\n # S0-V-S1 is pi - the other two\n #\n angle_s0vs1 = np.pi - angle_vs1s0 - angle_vs0s1\n assert np.all(angle_s0vs1 >= 0)\n #\n # Now we find the minimum angle per label\n #\n min_angle = scind.minimum(angle_s0vs1,v_labels,\n labels_to_consider)\n min_angle = fixup_scipy_ndimage_result(min_angle)\n min_angle_per_vertex = min_angle[anti_indexes_to_consider_per_vertex]\n #\n # Calculate the index into V of the minimum angle per label.\n # Use \"indexes\" instead of labels_to_consider so we get something\n # with the same shape as keep_me\n #\n min_position = scind.minimum_position(angle_s0vs1,v_labels,\n indexes)\n min_position = fixup_scipy_ndimage_result(min_position).astype(int)\n min_position = min_position.flatten()\n #\n # Case 1: minimum angle is obtuse or right. Accept S as the diameter.\n # Case 1a: there are no vertices. Accept S as the diameter.\n #\n vertex_counts = scind.sum(keep_me_vertices,\n hull[:,0],\n labels_to_consider)\n vertex_counts = fixup_scipy_ndimage_result(vertex_counts)\n case_1 = np.logical_or(min_angle >= np.pi / 2,\n vertex_counts == 0)\n \n if np.any(case_1):\n # convert from a boolean over indexes_to_consider to a boolean\n # over indexes\n finish_me = np.zeros((indexes.shape[0],),bool)\n finish_me[anti_indexes[labels_to_consider[case_1]]] = True\n s0_finish_me = hull[s0_idx[finish_me],1:].astype(float)\n s1_finish_me = hull[s1_idx[finish_me],1:].astype(float)\n centers[finish_me] = (s0_finish_me + s1_finish_me)/2\n radii[finish_me] = np.sqrt(np.sum((s0_finish_me - \n s1_finish_me)**2,1))/2\n keep_me[finish_me] = False\n #\n # Case 2: all angles for the minimum angle vertex are acute \n # or right.\n # Pick S0, S1 and the vertex with the\n # smallest angle as 3 points on the circle. If you look at the\n # geometry, the diameter is the length of S0-S1 divided by\n # the cosine of 1/2 of the angle. The center of the circle\n # is at the circumcenter of the triangle formed by S0, S1 and\n # V.\n case_2 = keep_me.copy()\n case_2[angle_vs1s0[min_position] > np.pi/2] = False\n case_2[angle_vs0s1[min_position] > np.pi/2] = False\n case_2[angle_s0vs1[min_position] > np.pi/2] = False\n \n if np.any(case_2):\n #\n # Wikipedia (http://en.wikipedia.org/wiki/Circumcircle#Cartesian_coordinates)\n # gives the following:\n # D = 2(S0y Vx + S1y S0x - S1y Vx - S0y S1x - S0x Vy + S1x Vy)\n # D = 2(S0x (S1y-Vy) + S1x(Vy-S0y) + Vx(S0y-S1y)\n # x = ((S0x**2+S0y**2)(S1y-Vy)+(S1x**2+S1y**2)(Vy-S0y)+(Vx**2+Vy**2)(S0y-S1y)) / D\n # y = ((S0x**2+S0y**2)(Vx-S1x)+(S1x**2+S1y**2)(S0y-Vy)+(Vx**2+Vy**2)(S1y-S0y)) / D\n #\n ss0 = hull[s0_idx[case_2],1:].astype(float)\n ss1 = hull[s1_idx[case_2],1:].astype(float)\n vv = v[min_position[case_2]].astype(float)\n Y = 0\n X = 1 \n D = 2*(ss0[:,X] * (ss1[:,Y] - vv[:,Y]) +\n ss1[:,X] * (vv[:,Y] - ss0[:,Y]) +\n vv[:,X] * (ss0[:,Y] - ss1[:,Y]))\n x = (np.sum(ss0**2,1)*(ss1[:,Y] - vv[:,Y]) +\n np.sum(ss1**2,1)*(vv[:,Y] - ss0[:,Y]) +\n np.sum(vv**2,1) *(ss0[:,Y] - ss1[:,Y])) / D\n y = (np.sum(ss0**2,1)*(vv[:,X] - ss1[:,X]) +\n np.sum(ss1**2,1)*(ss0[:,X] - vv[:,X]) +\n np.sum(vv**2,1) *(ss1[:,X] - ss0[:,X])) / D\n centers[case_2,X] = x\n centers[case_2,Y] = y\n distances = ss0-centers[case_2]\n radii[case_2] = np.sqrt(np.sum(distances**2,1))\n keep_me[case_2] = False\n #\n # Finally, for anybody who's left, for each of S0-S1-V and\n # S1-S0-V, for V, the vertex with the minimum angle,\n # find the largest obtuse angle. The vertex of this\n # angle (S0 or S1) is inside the enclosing circle, so take V\n # and either S1 or S0 as the new S.\n #\n # This involves a relabeling of within_label_indexes. We replace\n # either S0 or S1 with V and assign V either 0 or 1\n #\n if np.any(keep_me):\n labels_to_consider = indexes[keep_me]\n indexes_to_consider = anti_indexes[labels_to_consider]\n #\n # Index into within_label_indexes for each V with the\n # smallest angle\n #\n v_obtuse_indexes = v_indexes[min_position[keep_me]]\n angle_vs0s1_to_consider = angle_vs0s1[min_position[keep_me]]\n angle_vs1s0_to_consider = angle_vs1s0[min_position[keep_me]]\n #\n # Do the cases where S0 is larger\n #\n s0_is_obtuse = angle_vs0s1_to_consider > np.pi/2\n if np.any(s0_is_obtuse):\n #\n # The index of the obtuse S0\n #\n v_obtuse_s0_indexes = v_obtuse_indexes[s0_is_obtuse]\n obtuse_s0_idx = s0_idx[indexes_to_consider[s0_is_obtuse]]\n #\n # S0 gets the within_label_index of the vertex\n #\n within_label_indexes[obtuse_s0_idx] = \\\n within_label_indexes[v_obtuse_s0_indexes]\n #\n # Assign V as the new S0\n #\n s0_idx[indexes_to_consider[s0_is_obtuse]] = v_obtuse_s0_indexes\n within_label_indexes[v_obtuse_s0_indexes] = 0\n #\n # Do the cases where S1 is larger\n #\n s1_is_obtuse = np.logical_not(s0_is_obtuse)\n if np.any(s1_is_obtuse):\n #\n # The index of the obtuse S1\n #\n v_obtuse_s1_indexes = v_obtuse_indexes[s1_is_obtuse]\n obtuse_s1_idx = s1_idx[indexes_to_consider[s1_is_obtuse]]\n #\n # S1 gets V's within_label_index and goes onto the list\n # of considered vertices.\n #\n within_label_indexes[obtuse_s1_idx] = \\\n within_label_indexes[v_obtuse_s1_indexes]\n #\n # Assign V as the new S1\n #\n s1_idx[indexes_to_consider[s1_is_obtuse]] = v_obtuse_s1_indexes\n within_label_indexes[v_obtuse_s1_indexes] = 1\n return centers, radii", "response": "This function returns the location of the minimum enclosing circle and its radius"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef associate_by_distance(labels_a, labels_b, distance):\n '''Find the objects that are within a given distance of each other\n \n Given two labels matrices and a distance, find pairs of objects that\n are within the given distance of each other where the distance is\n the minimum distance between any point in the convex hull of the\n two objects.\n \n labels_a - first labels matrix\n labels_b - second labels matrix\n distance - distance to measure\n \n returns a n x 2 matrix where m[x,0] is the label number in labels1 and\n m[x,1] is the label number in labels2\n \n Algorithm for computing distance between convex polygons taken from\n Chin, \"Optimal Algorithms for the Intersection and the Minimum Distance \n Problems Between Planar Polygons\", IEEE Transactions on Computers, \n vol. C-32, # 12, December 1983\n '''\n if np.max(labels_a) == 0 or np.max(labels_b) == 0:\n return np.zeros((0,2),int)\n \n hull_a, point_counts_a = convex_hull(labels_a)\n hull_b, point_counts_b = convex_hull(labels_b)\n centers_a, radii_a = minimum_enclosing_circle(\n labels_a, hull_and_point_count = (hull_a, point_counts_a))\n centers_b, radii_b = minimum_enclosing_circle(\n labels_b, hull_and_point_count = (hull_b, point_counts_b))\n #\n # Make an indexer into the hull tables\n #\n indexer_a = np.cumsum(point_counts_a)\n indexer_a[1:] = indexer_a[:-1]\n indexer_a[0] = 0\n indexer_b = np.cumsum(point_counts_b)\n indexer_b[1:] = indexer_b[:-1]\n indexer_b[0] = 0\n #\n # Compute the distances between minimum enclosing circles =\n # distance - radius_a - radius_b\n #\n i,j = np.mgrid[0:len(radii_a),0:len(radii_b)]\n ab_distance = np.sqrt((centers_a[i,0]-centers_b[j,0])**2 +\n (centers_a[i,1]-centers_b[j,1])**2)\n ab_distance_minus_radii = ab_distance - radii_a[i] - radii_b[j]\n # Account for roundoff error\n ab_distance_minus_radii -= np.sqrt(np.finfo(float).eps)\n #\n # Exclude from consideration ab_distance > distance and automatically\n # choose those whose centers are within the distance\n #\n ab_easy_wins = ab_distance <= distance\n ij_wins = np.dstack((hull_a[indexer_a[i[ab_easy_wins]],0], \n hull_b[indexer_b[j[ab_easy_wins]],0]))\n ij_wins.shape = ij_wins.shape[1:]\n \n ab_consider = (ab_distance_minus_radii <= distance) & (~ ab_easy_wins)\n ij_consider = np.dstack((i[ab_consider], j[ab_consider]))\n ij_consider.shape = ij_consider.shape[1:]\n if np.product(ij_consider.shape) == 0:\n return ij_wins\n if True:\n wins = []\n distance2 = distance**2\n for ii,jj in ij_consider:\n a = hull_a[indexer_a[ii]:indexer_a[ii]+point_counts_a[ii],1:]\n b = hull_b[indexer_b[jj]:indexer_b[jj]+point_counts_b[jj],1:]\n d = minimum_distance2(a,centers_a[ii,:],\n b,centers_b[jj,:])\n if d <= distance2:\n wins.append((hull_a[indexer_a[ii],0],\n hull_b[indexer_b[jj],0]))\n ij_wins = np.vstack((ij_wins, np.array(wins)))\n return ij_wins\n else:\n #\n # For each point in the hull, get the next point mod # of points in hull\n #\n hull_next_a = np.arange(hull_a.shape[0])+1\n hull_next_a[indexer_a+point_counts_a-1] = indexer_a\n hull_next_b = np.arange(hull_b.shape[0])+1\n hull_next_b[indexer_b+point_counts_b-1] = indexer_b\n #\n # Parallelize the algorithm for overlap\n #\n # For each pair of points i, i+1 mod n in the hull, and the test point t\n # the cross product of the vector from i to i+1 and the vector from i+1\n # to t should have the same sign.\n #\n next_b = hull_b[hull_next_b,1:]\n vector_b = hull_b[:,1:] - next_b\n #\n # For each i,j, we have to compare the centers_a against point_counts_b[j]\n # crosses.\n #\n b_len = point_counts_b[ij_consider[:,1]]\n b_index = np.cumsum(point_counts_b)\n b_elems = b_index[-1]\n b_index[1:] = b_index[:-1]\n b_index[0] = 0\n #\n # First create a vector that's b_elems long and every element contains an\n # index into the ij_consider vector. How we do this:\n # 1) mark the first element at a particular index by 1, all others = 0\n # 2) Erase the first 1\n # 3) Take the cumulative sum which will increment to 1 when it hits the\n # first 1, again when it hits the second...etc.\n #\n b_indexer = np.zeros(b_elems, int)\n b_indexer[b_index[1:]] = 1\n b_indexer = np.cumsum(b_indexer)\n #\n # The sub-index is the index from 1 to n for each of the vertices\n # per b convex hull\n #\n b_sub_index = np.arange(b_elems) - b_index[b_indexer]\n #\n # For each element of b_indexer, get the i and j at that index\n #\n b_i = ij_consider[b_indexer,0]\n b_j = ij_consider[b_indexer,1]\n #\n # Compute the cross-products now\n #\n b_vector_b = vector_b[indexer_b[b_j]+b_sub_index,:]\n b_center_vector = (next_b[indexer_b[b_j]+b_sub_index,:] - \n centers_a[indexer_a[b_i]])\n cross = (b_vector_b[:,0] * b_center_vector[:,1] -\n b_vector_b[:,1] * b_center_vector[:,0])\n hits = (all_true(cross > 0, b_index) | all_true(cross < 0, b_index))\n \n ij_wins = np.vstack((ij_wins, ij_consider[hits,:]))\n ij_consider = ij_consider[~hits,:]\n if ij_consider.shape[0] == 0:\n return ij_wins", "response": "Given two sets of labels and a distance find the pairs of objects that are within a given distance of each other."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef minimum_distance2(hull_a, center_a, hull_b, center_b):\n '''Return the minimum distance or 0 if overlap between 2 convex hulls\n \n hull_a - list of points in clockwise direction\n center_a - a point within the hull\n hull_b - list of points in clockwise direction\n center_b - a point within the hull\n '''\n if hull_a.shape[0] < 3 or hull_b.shape[0] < 3:\n return slow_minimum_distance2(hull_a, hull_b)\n else:\n return faster_minimum_distance2(hull_a, center_a, hull_b, center_b)", "response": "Return the minimum distance between two convex hulls"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ndoing the minimum distance by exhaustive examination of all points in a hull and a hull b.", "response": "def slow_minimum_distance2(hull_a, hull_b):\n '''Do the minimum distance by exhaustive examination of all points'''\n d2_min = np.iinfo(int).max\n for a in hull_a:\n if within_hull(a, hull_b):\n return 0\n for b in hull_b:\n if within_hull(b, hull_a):\n return 0\n for pt_a in hull_a:\n for pt_b in hull_b:\n d2_min = min(d2_min, np.sum((pt_a - pt_b)**2))\n \n for h1, h2 in ((hull_a, hull_b), (hull_b, hull_a)):\n # Find the distance from a vertex in h1 to an edge in h2\n for pt1 in h1:\n prev_pt2 = h2[-1,:]\n for pt2 in h2:\n if (np.dot(pt2-prev_pt2,pt1-prev_pt2) > 0 and\n np.dot(prev_pt2-pt2,pt1-pt2) > 0):\n # points form an acute triangle, so edge is closer\n # than vertices\n d2_min = min(d2_min, distance2_to_line(pt1, prev_pt2, pt2))\n prev_pt2 = pt2\n return d2_min"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef faster_minimum_distance2(hull_a, center_a, hull_b, center_b):\n '''Do the minimum distance using the bimodal property of hull ordering\n \n '''\n #\n # Find the farthest vertex in b from some point within A. Find the\n # vertices within A visible from this point in B. If the point in A\n # is within B or the farthest vertex in B is within A, then the objects\n # intersect.\n #\n if within_hull(center_a, hull_b):\n return 0\n farthest_b = find_farthest(center_a, hull_b)\n if within_hull(hull_b[farthest_b,:], hull_a):\n return 0\n visible_b = find_visible(hull_b, center_a, farthest_b)\n #\n # Do the same for B\n if within_hull(center_b, hull_a):\n return 0\n farthest_a = find_farthest(center_b, hull_a)\n if within_hull(hull_a[farthest_a,:], hull_b):\n return 0\n visible_a = find_visible(hull_a, center_b, farthest_a)\n #\n # Now go from the first in A and last in B measuring distances\n # which should decrease as we move toward the best\n #\n i = visible_a[0]\n i_next = (i+1) % hull_a.shape[0]\n j = visible_b[1]\n j_next = (j+hull_b.shape[0]-1) % hull_b.shape[0]\n a = hull_a[i,:]\n a_next = hull_a[i_next,:]\n b = hull_b[j,:]\n b_next = hull_b[j_next,:]\n d2_min = np.sum((a-b)**2)\n \n while i != visible_a[1] and j != visible_b[0]:\n if lines_intersect(a, a_next, b, b_next):\n return 0\n if (np.dot(b-b_next,a-b_next) > 0 and\n np.dot(b_next-b,a-b) > 0):\n # do the edge if better than the vertex\n d2a = distance2_to_line(b, a, a_next)\n else:\n # try the next vertex of a\n d2a = np.sum((a_next-b)**2)\n if (np.dot(a-a_next,b-a_next) > 0 and\n np.dot(a_next-a,b-a) > 0):\n d2b = distance2_to_line(a, b, b_next)\n else:\n d2b = np.sum((b_next-a)**2)\n if d2a < d2_min and d2a < d2b:\n # The edge of A is closer than the best or the b-edge\n # Take it and advance A\n d2_min = d2a\n a = a_next\n i = i_next\n i_next = (i+1) % hull_a.shape[0]\n a_next = hull_a[i_next,:]\n elif d2b < d2_min:\n # B is better. Take it and advance\n d2_min = d2b\n b = b_next\n j = j_next\n j_next = (j+hull_b.shape[0]-1) % hull_b.shape[0]\n b_next = hull_b[j_next,:]\n else:\n return d2_min\n #\n # Some more to do... either one more i or one more j\n #\n while i != visible_a[1]:\n d2_min = min(d2_min, np.sum((a_next-b)**2))\n a = a_next\n i = i_next\n i_next = (i+1) % hull_a.shape[0]\n a_next = hull_a[i_next,:]\n \n while j != visible_b[0]:\n d2_min = min(d2_min, np.sum((b_next-a)**2))\n b = b_next\n j = j_next\n j_next = (j+ hull_b.shape[0]-1) % hull_b.shape[0]\n b_next = hull_b[j_next,:]\n return d2_min", "response": "Do the minimum distance using the bimodal property of hull ordering\n \n "} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef find_farthest(point, hull):\n '''Find the vertex in hull farthest away from a point'''\n d_start = np.sum((point-hull[0,:])**2)\n d_end = np.sum((point-hull[-1,:])**2)\n if d_start > d_end:\n # Go in the forward direction\n i = 1\n inc = 1\n term = hull.shape[0]\n d2_max = d_start\n else:\n # Go in the reverse direction\n i = hull.shape[0]-2\n inc = -1\n term = -1\n d2_max = d_end\n while i != term:\n d2 = np.sum((point - hull[i,:])**2)\n if d2 < d2_max:\n break\n i += inc\n d2_max = d2\n return i-inc", "response": "Find the vertex in hull farthest away from a point"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngiving an observer location find the first and last visible AttributeNames that are in the hull and the observer at the observer.", "response": "def find_visible(hull, observer, background):\n '''Given an observer location, find the first and last visible\n points in the hull\n \n The observer at \"observer\" is looking at the hull whose most distant\n vertex from the observer is \"background. Find the vertices that are\n the furthest distance from the line between observer and background.\n These will be the start and ends in the vertex chain of vertices\n visible by the observer.\n '''\n pt_background = hull[background,:]\n vector = pt_background - observer\n i = background\n dmax = 0\n while True:\n i_next = (i+1) % hull.shape[0]\n pt_next = hull[i_next,:]\n d = -np.cross(vector, pt_next-pt_background)\n if d < dmax or i_next == background:\n i_min = i\n break\n dmax = d\n i = i_next\n dmax = 0\n i = background\n while True:\n i_next = (i+hull.shape[0]-1) % hull.shape[0]\n pt_next = hull[i_next,:]\n d = np.cross(vector, pt_next-pt_background)\n if d < dmax or i_next == background:\n i_max = i\n break\n dmax = d\n i = i_next\n return (i_min, i_max)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef distance2_to_line(pt, l0, l1):\n '''The perpendicular distance squared from a point to a line\n \n pt - point in question\n l0 - one point on the line\n l1 - another point on the line\n '''\n pt = np.atleast_1d(pt)\n l0 = np.atleast_1d(l0)\n l1 = np.atleast_1d(l1)\n reshape = pt.ndim == 1\n if reshape:\n pt.shape = l0.shape = l1.shape = (1, pt.shape[0])\n result = (((l0[:,0] - l1[:,0]) * (l0[:,1] - pt[:,1]) - \n (l0[:,0] - pt[:,0]) * (l0[:,1] - l1[:,1]))**2 /\n np.sum((l1-l0)**2, 1))\n if reshape:\n result = result[0]\n return result", "response": "The perpendicular distance squared from a point to a line."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef within_hull(point, hull):\n '''Return true if the point is within the convex hull'''\n h_prev_pt = hull[-1,:]\n for h_pt in hull:\n if np.cross(h_pt-h_prev_pt, point - h_pt) >= 0:\n return False\n h_prev_pt = h_pt\n return True", "response": "Return True if the point is within the convex hull."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef all_true(a, indexes):\n '''Find which vectors have all-true elements\n \n Given an array, \"a\" and indexes into the first elements of vectors\n within that array, return an array where each element is true if\n all elements of the corresponding vector are true.\n \n Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3]\n vectors = [[1,1,0],[1,1,1,1]]\n return = [False, True]\n '''\n if len(indexes) == 0:\n return np.zeros(0,bool)\n elif len(indexes) == 1:\n return np.all(a)\n cs = np.zeros(len(a)+1,int)\n cs[1:] = np.cumsum(a)\n augmented_indexes = np.zeros(len(indexes)+1, int)\n augmented_indexes[0:-1] = indexes + 1\n augmented_indexes[-1] = len(a) + 1\n counts = augmented_indexes[1:]-augmented_indexes[0:-1]\n hits = cs[augmented_indexes[1:]-1] - cs[augmented_indexes[0:-1]-1]\n return counts == hits", "response": "Find which vectors have all - true elements within that array and indexes into the first elements of the corresponding vectors\n \n ."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncalculate measurements of ellipses equivalent to the second moments of labels image labels indexes are the indexes of the objects in the image.", "response": "def ellipse_from_second_moments(image, labels, indexes, wants_compactness = False):\n \"\"\"Calculate measurements of ellipses equivalent to the second moments of labels\n \n image - the intensity at each point\n labels - for each labeled object, derive an ellipse\n indexes - sequence of indexes to process\n \n returns the following arrays:\n coordinates of the center of the ellipse\n eccentricity\n major axis length\n minor axis length\n orientation\n compactness (if asked for)\n \n some definitions taken from \"Image Moments-Based Structuring and Tracking\n of Objects\", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO,\n http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf\n particularly equation 5 (which has some errors in it).\n These yield the rectangle with equivalent second moments. I translate\n to the ellipse by multiplying by 1.154701 which is Matlab's calculation\n of the major and minor axis length for a square of length X divided\n by the actual length of the side of a square of that length.\n \n eccentricity is the distance between foci divided by the major axis length\n orientation is the angle of the major axis with respect to the X axis\n compactness is the variance of the radial distribution normalized by the area\n \"\"\"\n if len(indexes) == 0:\n return (np.zeros((0,2)), np.zeros((0,)), np.zeros((0,)), \n np.zeros((0,)),np.zeros((0,)))\n i,j = np.argwhere(labels != 0).transpose()\n return ellipse_from_second_moments_ijv(i,j,image[i,j], labels[i,j], indexes, wants_compactness)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncalculate measurements of ellipses equivalent to the second moments of labels i,j - coordinates of each point image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation some definitions taken from \"Image Moments-Based Structuring and Tracking of Objects\", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis", "response": "def ellipse_from_second_moments_ijv(i,j, image, labels, indexes, wants_compactness = False):\n \"\"\"Calculate measurements of ellipses equivalent to the second moments of labels\n \n i,j - coordinates of each point\n image - the intensity at each point\n labels - for each labeled object, derive an ellipse\n indexes - sequence of indexes to process\n \n returns the following arrays:\n coordinates of the center of the ellipse\n eccentricity\n major axis length\n minor axis length\n orientation\n \n some definitions taken from \"Image Moments-Based Structuring and Tracking\n of Objects\", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO,\n http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf\n particularly equation 5 (which has some errors in it).\n These yield the rectangle with equivalent second moments. I translate\n to the ellipse by multiplying by 1.154701 which is Matlab's calculation\n of the major and minor axis length for a square of length X divided\n by the actual length of the side of a square of that length.\n \n eccentricity is the distance between foci divided by the major axis length\n orientation is the angle of the major axis with respect to the X axis\n \"\"\"\n if len(indexes) == 0:\n return [np.zeros((0,2))] + [np.zeros((0,))] * (5 if wants_compactness else 4)\n if len(i) == 0:\n return ([np.zeros((len(indexes), 2)), np.ones(len(indexes))] +\n [np.zeros(len(indexes))] * (4 if wants_compactness else 3))\n #\n # Normalize to center of object for stability\n #\n nlabels = np.max(indexes)+1\n m = np.array([[None, 0, None],\n [0, None, None],\n [None, None, None]], object)\n if np.all(image == 1):\n image = 1\n m[0,0] = intensity = np.bincount(labels)\n else:\n m[0,0] = intensity = np.bincount(labels, image)\n ic = np.bincount(labels, i * image) / intensity\n jc = np.bincount(labels, j * image) / intensity\n i = i - ic[labels]\n j = j - jc[labels]\n #\n # Start by calculating the moments m[p][q] of the image\n # sum(i**p j**q)\n #\n # m[1,0] = 0 via normalization\n # m[0,1] = 0 via normalization\n m[1,1] = np.bincount(labels, i*j*image)\n m[2,0] = np.bincount(labels, i*i*image)\n m[0,2] = np.bincount(labels, j*j*image)\n \n a = m[2,0] / m[0,0]\n b = 2*m[1,1]/m[0,0]\n c = m[0,2] / m[0,0]\n \n theta = np.arctan2(b,c-a) / 2\n temp = np.sqrt(b**2+(a-c)**2)\n #\n # If you do a linear regression of the circles from 1 to 50 radius\n # in Matlab, the resultant values fit a line with slope=.9975 and\n # intercept .095. I'm adjusting the lengths accordingly.\n #\n mystery_constant = 0.095\n mystery_multiplier = 0.9975\n major_axis_len = (np.sqrt(8*(a+c+temp)) * mystery_multiplier +\n mystery_constant)\n minor_axis_len = (np.sqrt(8*(a+c-temp)) * mystery_multiplier +\n mystery_constant)\n eccentricity = np.sqrt(1-(minor_axis_len / major_axis_len)**2)\n compactness = 2 * np.pi * (a + c) / m[0,0]\n return ([np.column_stack((ic[indexes], jc[indexes])),\n eccentricity[indexes],\n major_axis_len[indexes],\n minor_axis_len[indexes],\n theta[indexes]] +\n ([compactness[indexes]] if wants_compactness else []))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncalculating the area of each object divided by the area of its bounding box.", "response": "def calculate_extents(labels, indexes):\n \"\"\"Return the area of each object divided by the area of its bounding box\"\"\"\n fix = fixup_scipy_ndimage_result\n areas = fix(scind.sum(np.ones(labels.shape),labels,np.array(indexes, dtype=np.int32)))\n y,x = np.mgrid[0:labels.shape[0],0:labels.shape[1]]\n xmin = fix(scind.minimum(x, labels, indexes))\n xmax = fix(scind.maximum(x, labels, indexes))\n ymin = fix(scind.minimum(y, labels, indexes))\n ymax = fix(scind.maximum(y, labels, indexes))\n bbareas = (xmax-xmin+1)*(ymax-ymin+1)\n return areas / bbareas"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncalculates the perimeter vector for the given surrounding points", "response": "def __calculate_perimeter_scoring():\n \"\"\"Return a 512 element vector which gives the perimeter given surrounding pts\n \n \"\"\"\n #\n # This is the array from the paper - a 256 - element array leaving out\n # the center point. The first value is the index, the second, the perimeter\n #\n prashker = np.array([ \n [0 ,4 ],[32,4 ],[64,3 ],[96 ,1.414],[128,4 ],[160,4 ],[192,1.414],[224,2.828],\n [1 ,4 ],[33,4 ],[65,3 ],[97 ,1.414],[129,4 ],[161,4 ],[193,3 ],[225,3 ],\n [2 ,3 ],[34,3 ],[66,2 ],[98 ,2 ],[130,3 ],[162,3 ],[194,2 ],[226,2 ],\n [3 ,1.414],[35,1.414],[67,2 ],[99 ,2 ],[131,3 ],[163,3 ],[195,2 ],[227,2 ],\n [4 ,4 ],[36,4 ],[68,3 ],[100,3 ],[132,4 ],[164,4 ],[196,1.414],[228,3 ],\n [5 ,4 ],[37,4 ],[69,3 ],[101,3 ],[133,4 ],[165,4 ],[197,3 ],[229,3 ],\n [6 ,1.414],[38,3 ],[70,2 ],[102,2 ],[134,1.414],[166,3 ],[198,2 ],[230,2 ],\n [7 ,2.828],[39,3 ],[71,2 ],[103,2 ],[135,3 ],[167,3 ],[199,2 ],[231,1.414],\n [8 ,3 ],[40,1.414],[72,2 ],[104,2 ],[136,3 ],[168,1.414],[200,1.414],[232,1.414],\n [9 ,1.414],[41,2.828],[73,1.414],[105,1.414],[137,3 ],[169,3 ],[201,1.414],[233,1.414],\n [10,2 ],[42,1.414],[74,1 ],[106,1 ],[138,2 ],[170,2 ],[202,1 ],[234,1.414],\n [11,2 ],[43,1.414],[75,1 ],[107,1 ],[139,2 ],[171,2 ],[203,1 ],[235,1 ],\n [12,3 ],[44,3 ],[76,2 ],[108,2 ],[140,3 ],[172,3 ],[204,2 ],[236,2 ],\n [13,1.414],[45,3 ],[77,2 ],[109,2 ],[141,3 ],[173,3 ],[205,1.414],[237,1.414],\n [14,1.414],[46,1.414],[78,1 ],[110,1 ],[142,2 ],[174,1.414],[206,2 ],[238,1 ],\n [15,1.414],[47,1.414],[79,1.414],[111,1 ],[143,2 ],[175,1.414],[207,1 ],[239,1 ],\n [16,3 ],[48,3 ],[80,2 ],[112,1.414],[144,1.414],[176,1.414],[208,2 ],[240,1.414],\n [17,3 ],[49,3 ],[81,2 ],[113,2 ],[145,3 ],[177,3 ],[209,2 ],[241,2 ],\n [18,2 ],[50,2 ],[82,1 ],[114,1 ],[146,1.414],[178,2 ],[210,1 ],[242,1.414],\n [19,1.414],[51,2 ],[83,1 ],[115,2 ],[147,1.414],[179,1.414],[211,1 ],[243,1 ],\n [20,1.414],[52,3 ],[84,1.414],[116,1.414],[148,2.828],[180,3 ],[212,1.414],[244,1.414],\n [21,1.414],[53,3 ],[85,2 ],[117,1.414],[149,3 ],[181,3 ],[213,2 ],[245,1.414],\n [22,2 ],[54,2 ],[86,1 ],[118,1 ],[150,1.414],[182,2 ],[214,1 ],[246,1 ],\n [23,1.414],[55,2 ],[87,1.414],[119,1 ],[151,1.414],[183,1.414],[215,1 ],[247,1 ],\n [24,2 ],[56,2 ],[88,1 ],[120,1 ],[152,2 ],[184,2 ],[216,1 ],[248,1 ],\n [25,2 ],[57,2 ],[89,1 ],[121,1.414],[153,2 ],[185,2 ],[217,1 ],[249,1 ],\n [26,1 ],[58,1 ],[90,0 ],[122,0 ],[154,1 ],[186,2 ],[218,0 ],[250,0 ],\n [27,1 ],[59,1.414],[91,0 ],[123,0 ],[155,1 ],[187,1 ],[219,0 ],[251,0 ],\n [28,2 ],[60,2 ],[92,1 ],[124,1 ],[156,2 ],[188,2 ],[220,1.414],[252,1 ],\n [29,2 ],[61,2 ],[93,2 ],[125,1 ],[157,2 ],[189,1.414],[221,1 ],[253,1 ],\n [30,1 ],[62,1 ],[94,0 ],[126,0 ],[158,1.414],[190,1 ],[222,0 ],[254,0 ],\n [31,1 ],[63,1 ],[95,0 ],[127,0 ],[159,1 ],[191,1 ],[223,0 ],[255,0]])\n score = np.zeros((512,))\n i = np.zeros((prashker.shape[0]),int)\n for j in range(4): # 1,2,4,8\n i = i+((prashker[:,0].astype(int) // 2**j)%2)*2**j\n i = i+16\n for j in range(4,8):\n i = i+((prashker[:,0].astype(int) // 2**j)%2)*2**(j+1)\n score[i.astype(int)] = prashker[:,1]\n return score"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncalculates the perimeters between adjacent pixels in the perimeters of the labels.", "response": "def calculate_perimeters(labels, indexes):\n \"\"\"Count the distances between adjacent pixels in the perimeters of the labels\"\"\"\n #\n # Create arrays that tell whether a pixel is like its neighbors.\n # index = 0 is the pixel -1,-1 from the pixel of interest, 1 is -1,0, etc.\n #\n m = table_idx_from_labels(labels)\n pixel_score = __perimeter_scoring[m]\n return fixup_scipy_ndimage_result(scind.sum(pixel_score, labels, np.array(indexes,dtype=np.int32)))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef table_idx_from_labels(labels):\n '''Return an array of indexes into a morphology lookup table\n labels - a labels matrix\n \n returns a matrix of values between 0 and 511 of indices appropriate\n for table_lookup where a pixel's index is determined based on whether\n or not the pixel has the same label as its neighbors (and is labeled)\n '''\n m=np.zeros((labels.shape[0],labels.shape[1]),int)\n exponent = 0\n for i in range(-1,2):\n ilow = (i==-1 and 1) or 0\n iend = (i==1 and labels.shape[0]-1) or labels.shape[0] \n for j in range(-1,2):\n jlow = (j==-1 and 1) or 0\n jend = (j==1 and labels.shape[1]-1) or labels.shape[1]\n #\n # Points outside of bounds are different from what's outside,\n # so set untouched points to \"different\"\n #\n mask = np.zeros(labels.shape, bool)\n mask[ilow:iend, jlow:jend] = (labels[ilow:iend,jlow:jend] == \n labels[ilow+i:iend+i,jlow+j:jend+j])\n m[mask] += 2**exponent\n exponent += 1\n return m", "response": "Return an array of indexes into a morphology lookup table\n labels - a labels matrix containing the indices of the morphology lookup table containing the pixel s label."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncalculating the area of the convex hull of each labeled object.", "response": "def calculate_convex_hull_areas(labels,indexes=None):\n \"\"\"Calulculate the area of the convex hull of each labeled object\n \n labels - a label matrix\n indexes - None: calculate convex hull area over entire image\n number: calculate convex hull for a single label\n sequence: calculate convex hull for labels matching a sequence\n member and return areas in same order.\n \"\"\"\n if getattr(indexes,\"__getitem__\",False):\n indexes = np.array(indexes,dtype=np.int32)\n elif indexes is not None:\n indexes = np.array([indexes],dtype=np.int32)\n else:\n labels = labels !=0\n indexes = np.array([1],dtype=np.int32)\n hull, counts = convex_hull(labels, indexes)\n result = np.zeros((counts.shape[0],))\n #\n # Get rid of the degenerate cases\n #\n result[counts==1] = 1 # a single point has area 1\n if not np.any(counts >1):\n return result\n #\n # Given a label number \"index_of_label\" indexes into the result\n #\n index_of_label = np.zeros((hull[:,0].max()+1),int)\n index_of_label[indexes] = np.array(list(range(indexes.shape[0])))\n #\n # hull_index is the index into hull of the first point on the hull\n # per label\n #\n hull_index = np.zeros((counts.shape[0],),int)\n hull_index[1:] = np.cumsum(counts[:-1])\n #\n # A 2-point case is a line. The area of a line is its length * 1\n # and its length needs to be expanded by 1 because the end-points are\n # at the limits, not the ends.\n # \n if np.any(counts==2):\n diff_2 = hull[hull_index[counts==2],1:]-hull[hull_index[counts==2]+1,1:]\n result[counts==2] = np.sqrt(np.sum(diff_2**2,1))+1\n if not np.any(counts>=3):\n return result\n #\n # Now do the non-degenerate cases (_nd)\n #\n counts_per_label = np.zeros((hull[:,0].max()+1),counts.dtype)\n counts_per_label[indexes] = counts\n hull_nd = hull[counts_per_label[hull[:,0]] >=3]\n counts_nd = counts[counts>=3]\n indexes_nd = indexes[counts>=3]\n index_of_label_nd = np.zeros((index_of_label.shape[0],),int)\n index_of_label_nd[indexes_nd] = np.array(list(range(indexes_nd.shape[0])))\n #\n # Figure out the within-label index of each point in a label. This is\n # so we can do modulo arithmetic when pairing a point with the next\n # when determining an edge\n #\n hull_index_nd = np.zeros((counts_nd.shape[0],),int)\n if hull_index_nd.shape[0] > 1:\n hull_index_nd[1:] = np.cumsum(counts_nd[:-1])\n index_of_label_per_pixel_nd = index_of_label_nd[hull_nd[:,0]]\n hull_index_per_pixel_nd = hull_index_nd[index_of_label_per_pixel_nd] \n within_label_index = (np.array(list(range(hull_nd.shape[0]))) -\n hull_index_per_pixel_nd)\n #\n # Find some point within each convex hull.\n #\n within_hull = np.zeros((counts_nd.shape[0],2))\n within_hull[:,0] = scind.sum(hull_nd[:,1],\n hull_nd[:,0],\n indexes_nd) / counts_nd\n within_hull[:,1] = scind.sum(hull_nd[:,2],\n hull_nd[:,0],\n indexes_nd) / counts_nd\n within_hull_per_pixel = within_hull[index_of_label_per_pixel_nd]\n #\n # Now, we do a little, slightly wierd fixup, arguing that the\n # edge of a pixel is +/- .5 of its coordinate. So we move the ones\n # left of center to the left by .5, right of center to the right by .5\n # etc.\n #\n # It works for a square...\n #\n # 2012-06-04 (thouis): Numpy trunk no longer allows unsafe type conversion\n # in in-place operations, which was what used to happen here\n # (adding/subtracting .5 from an integer array, which then just got\n # truncated). So now it just adds 1 to the pixels right or below the\n # center.\n #\n # It would probably be better to do this in one of two other ways:\n # - Compute area with a circular ferret\n # = polygon area + radius * perimeter + pi * radius^2\n # - Compute area with a square ferret\n # = polygon area + 2 * radius * (horizontal_projection + vertical_projection) + 4*radius^2\n hull_nd[hull_nd[:,1] >= within_hull_per_pixel[:,0],1] += 1\n hull_nd[hull_nd[:,2] >= within_hull_per_pixel[:,1],2] += 1\n #\n # Finally, we go around the circle, computing triangle areas\n # from point n to point n+1 (modulo count) to the point within\n # the hull.\n #\n plus_one_idx = np.array(list(range(hull_nd.shape[0])))+1\n modulo_mask = within_label_index+1 == counts_nd[index_of_label_per_pixel_nd]\n plus_one_idx[modulo_mask] = hull_index_per_pixel_nd[modulo_mask]\n area_per_pt_nd = triangle_areas(hull_nd[:,1:],\n hull_nd[plus_one_idx,1:],\n within_hull_per_pixel)\n #\n # The convex area is the sum of these triangles\n #\n result[counts>=3] = scind.sum(area_per_pt_nd, hull_nd[:,0], indexes_nd)\n return result"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncalculates the solidity of each label in the cluster.", "response": "def calculate_solidity(labels,indexes=None):\n \"\"\"Calculate the area of each label divided by the area of its convex hull\n \n labels - a label matrix\n indexes - the indexes of the labels to measure\n \"\"\"\n if indexes is not None:\n \"\"\" Convert to compat 32bit integer \"\"\"\n indexes = np.array(indexes,dtype=np.int32)\n areas = scind.sum(np.ones(labels.shape),labels,indexes)\n convex_hull_areas = calculate_convex_hull_areas(labels, indexes)\n return areas / convex_hull_areas"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef euler_number(labels, indexes=None):\n if indexes is None:\n labels = labels != 0\n indexes = np.array([1],dtype=np.int32)\n elif getattr(indexes,'__getitem__',False):\n indexes = np.array(indexes,dtype=np.int32)\n else:\n indexes = np.array([indexes],dtype=np.int32)\n fix = fixup_scipy_ndimage_result\n #\n # The algorithm here is from the following reference:\n # S.B. Gray, \"Local Properties of Binary Images in Two Dimensions\",\n # IEEE Transactions on Computers, Vol c-20 # 5 p 551, May 1971\n #\n # The general idea is that crossings into objects can be measured locally\n # through counting numbers of patterns resulting in crossings. There\n # are three sets that are applicable in Euler Numbers:\n # Q1: 1 0 0 1 0 0 0 0 (or more simply, 1 bit per quad)\n # 0 0 0 0 1 0 0 1\n #\n # Q3: 0 1 1 0 1 1 1 1 (or 3 bits per quad)\n # 1 1 1 1 1 0 0 1\n #\n # QD: 1 0 0 1\n # 0 1 1 0\n #\n # and the Euler number = W of an object is\n #\n # 4W = n(Q1) - n(Q3) - 2n(QD) (equation 34)\n # W = (n(Q1) - n(Q3) - 2n(QD))/4\n #\n # We shift the label matrix to make matrices, padded by zeros on the\n # sides for each of the four positions of the quad:\n # I00 I01\n # I10 I11\n # \n # We can then assign each bitquad to a label based on the value\n # of the label at one of the \"on\" bits. For example, the first pattern\n # of Q1 has the label I00 because that bit is on. It's truth value is\n # I00 != I01 and I00 != I02 and I00 != I03.\n #\n I_shape = (labels.shape[0]+3,labels.shape[1]+3)\n I00 = np.zeros(I_shape,int)\n I01 = np.zeros(I_shape,int)\n I10 = np.zeros(I_shape,int)\n I11 = np.zeros(I_shape,int)\n slice_00 = [slice(1,labels.shape[0]+1),slice(1,labels.shape[1]+1)]\n slice_01 = [slice(1,labels.shape[0]+1),slice(0,labels.shape[1])]\n slice_10 = [slice(labels.shape[0]),slice(1,labels.shape[1]+1)]\n slice_11 = [slice(0,labels.shape[0]),slice(0,labels.shape[1])]\n I00[slice_00] = labels\n I01[slice_01] = labels\n I10[slice_10] = labels\n I11[slice_11] = labels\n #\n # There are 6 binary comparisons among the four bits\n #\n EQ00_01 = I00 == I01; EQ01_00 = EQ00_01\n EQ00_10 = I00 == I10; EQ10_00 = EQ00_10\n EQ00_11 = I00 == I11; EQ11_00 = EQ00_11\n EQ01_10 = I01 == I10; EQ10_01 = EQ01_10\n EQ01_11 = I01 == I11; EQ11_01 = EQ01_11\n EQ10_11 = I10 == I11; EQ11_10 = EQ10_11\n NE00_01 = np.logical_not(EQ00_01); NE01_00 = NE00_01\n NE00_10 = np.logical_not(EQ00_10); NE10_00 = NE00_10\n NE00_11 = np.logical_not(EQ00_11); NE11_00 = NE00_11\n NE01_10 = np.logical_not(EQ01_10); NE10_01 = NE01_10\n NE01_11 = np.logical_not(EQ01_11); NE11_01 = NE01_11\n NE10_11 = np.logical_not(EQ10_11); NE11_10 = NE10_11\n #\n # Q1: 1 0 \n # 0 0\n Q1_condition = (NE00_01 & NE00_10 & NE00_11).astype(int)\n # 0 1\n # 0 0\n Q1_condition[slice_00] += (NE01_00 & NE01_10 & NE01_11)[slice_01]\n # 0 0\n # 1 0\n Q1_condition[slice_00] += (NE10_00 & NE10_01 & NE10_11)[slice_10]\n # 0 0\n # 0 1\n Q1_condition[slice_00] += (NE11_00 & NE11_01 & NE11_10)[slice_11]\n Q1 = fix(scind.sum(Q1_condition, I00, indexes))\n #\n # Q3: 1 1\n # 1 0\n Q3_condition = (EQ00_10 & EQ00_01 & NE00_11).astype(int)\n # 0 1\n # 1 1\n Q3_condition[slice_00] += (NE11_00 & EQ11_10 & EQ11_01)[slice_11]\n # 1 0\n # 1 1\n Q3_condition += (NE00_01 & EQ00_10 & EQ00_11)\n # 1 1\n # 0 1\n Q3_condition += (NE00_10 & EQ00_01 & EQ00_11)\n Q3 = fix(scind.sum(Q3_condition, I00, indexes))\n # QD: 1 0\n # 0 1\n QD_condition = (NE00_01 & NE00_10 & EQ00_11).astype(int)\n # 0 1\n # 1 0\n QD_condition[slice_00] += (NE01_00 & NE01_11 & EQ01_10)[slice_01]\n QD = fix(scind.sum(QD_condition, I00, indexes))\n W = (Q1 - Q3 - 2*QD).astype(float)/4.0\n if indexes is None:\n return W[0]\n return W", "response": "Calculate the Euler number of each label in the binary image."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef block(shape, block_shape):\n shape = np.array(shape)\n block_shape = np.array(block_shape)\n i,j = np.mgrid[0:shape[0],0:shape[1]]\n ijmax = (shape.astype(float)/block_shape.astype(float)).astype(int)\n ijmax = np.maximum(ijmax, 1)\n multiplier = ijmax.astype(float) / shape.astype(float)\n i = (i * multiplier[0]).astype(int)\n j = (j * multiplier[1]).astype(int)\n labels = i * ijmax[1] + j\n indexes = np.array(list(range(np.product(ijmax))))\n return labels, indexes", "response": "Create a labels image that divides the image into blocks and returns the indexes of all labels generated by block - process the image"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef white_tophat(image, radius=None, mask=None, footprint=None):\n '''White tophat filter an image using a circular structuring element\n \n image - image in question\n radius - radius of the circular structuring element. If no radius, use\n an 8-connected structuring element.\n mask - mask of significant pixels in the image. Points outside of\n the mask will not participate in the morphological operations\n '''\n #\n # Subtract the opening to get the tophat\n #\n final_image = image - opening(image, radius, mask, footprint) \n #\n # Paint the masked pixels into the final image\n #\n if not mask is None:\n not_mask = np.logical_not(mask)\n final_image[not_mask] = image[not_mask]\n return final_image", "response": "White tophat filter an image using a circular structuring element."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nblack tophat filter an image using a circular structuring element.", "response": "def black_tophat(image, radius=None, mask=None, footprint=None):\n '''Black tophat filter an image using a circular structuring element\n \n image - image in question\n radius - radius of the circular structuring element. If no radius, use\n an 8-connected structuring element.\n mask - mask of significant pixels in the image. Points outside of\n the mask will not participate in the morphological operations\n '''\n #\n # Subtract the image from the closing to get the bothat\n #\n final_image = closing(image, radius, mask, footprint) - image \n #\n # Paint the masked pixels into the final image\n #\n if not mask is None:\n not_mask = np.logical_not(mask)\n final_image[not_mask] = image[not_mask]\n return final_image"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef grey_erosion(image, radius=None, mask=None, footprint=None):\n '''Perform a grey erosion with masking'''\n if footprint is None:\n if radius is None:\n footprint = np.ones((3,3),bool)\n radius = 1\n else:\n footprint = strel_disk(radius)==1\n else:\n radius = max(1, np.max(np.array(footprint.shape) // 2))\n iradius = int(np.ceil(radius))\n #\n # Do a grey_erosion with masked pixels = 1 so they don't participate\n #\n big_image = np.ones(np.array(image.shape)+iradius*2)\n big_image[iradius:-iradius,iradius:-iradius] = image\n if not mask is None:\n not_mask = np.logical_not(mask)\n big_image[iradius:-iradius,iradius:-iradius][not_mask] = 1\n processed_image = scind.grey_erosion(big_image, footprint=footprint)\n final_image = processed_image[iradius:-iradius,iradius:-iradius]\n if not mask is None:\n final_image[not_mask] = image[not_mask]\n return final_image", "response": "Perform a grey erosion with masking"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef grey_reconstruction(image, mask, footprint=None, offset=None):\n '''Perform a morphological reconstruction of the image\n \n grey_dilate the image, constraining each pixel to have a value that is\n at most that of the mask.\n image - the seed image\n mask - the mask, giving the maximum allowed value at each point\n footprint - a boolean array giving the neighborhood pixels to be used\n in the dilation. None = 8-connected\n \n The algorithm is taken from:\n Robinson, \"Efficient morphological reconstruction: a downhill filter\",\n Pattern Recognition Letters 25 (2004) 1759-1767\n '''\n assert tuple(image.shape) == tuple(mask.shape)\n assert np.all(image <= mask)\n if footprint is None:\n footprint = np.ones([3]*image.ndim, bool)\n else:\n footprint = footprint.copy()\n \n if offset is None:\n assert all([d % 2 == 1 for d in footprint.shape]),\\\n \"Footprint dimensions must all be odd\"\n offset = np.array([d//2 for d in footprint.shape])\n # Cross out the center of the footprint\n footprint[[slice(d,d+1) for d in offset]] = False\n #\n # Construct an array that's padded on the edges so we can ignore boundaries\n # The array is a dstack of the image and the mask; this lets us interleave\n # image and mask pixels when sorting which makes list manipulations easier\n #\n padding = (np.array(footprint.shape)//2).astype(int)\n dims = np.zeros(image.ndim+1,int)\n dims[1:] = np.array(image.shape)+2*padding\n dims[0] = 2\n inside_slices = [slice(p,-p) for p in padding]\n values = np.ones(dims)*np.min(image)\n values[[0]+inside_slices] = image\n values[[1]+inside_slices] = mask\n #\n # Create a list of strides across the array to get the neighbors\n # within a flattened array\n #\n value_stride = np.array(values.strides[1:]) // values.dtype.itemsize\n image_stride = values.strides[0] // values.dtype.itemsize\n footprint_mgrid = np.mgrid[[slice(-o,d - o) \n for d,o in zip(footprint.shape,offset)]]\n footprint_offsets = footprint_mgrid[:,footprint].transpose()\n strides = np.array([np.sum(value_stride * footprint_offset)\n for footprint_offset in footprint_offsets],\n np.int32)\n values = values.flatten()\n value_sort = np.lexsort([-values]).astype(np.int32)\n #\n # Make a linked list of pixels sorted by value. -1 is the list terminator.\n #\n prev = -np.ones(len(values), np.int32)\n next = -np.ones(len(values), np.int32)\n prev[value_sort[1:]] = value_sort[:-1]\n next[value_sort[:-1]] = value_sort[1:]\n #\n # Create a rank-order value array so that the Cython inner-loop\n # can operate on a uniform data type\n #\n values, value_map = rank_order(values)\n current = value_sort[0]\n slow = False\n \n if slow:\n while current != -1:\n if current < image_stride:\n current_value = values[current]\n if current_value == 0:\n break\n neighbors = strides+current\n for neighbor in neighbors:\n if neighbor < 0:\n raise IndexError(\"Index out of bounds: %d, current=%d, current_value=%d\"%\n (neighbor,\n np.unravel_index(current, dims),\n current_value))\n neighbor_value = values[neighbor]\n # Only do neighbors less than the current value\n if neighbor_value < current_value:\n mask_value = values[neighbor + image_stride]\n # Only do neighbors less than the mask value\n if neighbor_value < mask_value:\n # Raise the neighbor to the mask value if\n # the mask is less than current\n if mask_value < current_value:\n link = neighbor + image_stride\n values[neighbor] = mask_value\n else:\n link = current\n values[neighbor] = current_value\n # unlink the neighbor\n nprev = prev[neighbor]\n nnext = next[neighbor]\n next[nprev] = nnext\n if nnext != -1:\n prev[nnext] = nprev\n # link the neighbor after the link\n next[neighbor] = next[link]\n prev[neighbor] = link\n prev[next[link]] = neighbor\n next[link] = neighbor\n current = next[current]\n else:\n grey_reconstruction_loop(values, prev, next, strides, current, \n image_stride)\n #\n # Reshape the values array to the shape of the padded image\n # and return the unpadded portion of that result\n #\n values = value_map[values[:image_stride]]\n values.shape = np.array(image.shape)+2*padding\n return values[inside_slices]", "response": "Perform a morphological reconstruction of the image using the greyscale algorithm."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndo a morphological opening of a structureuring element.", "response": "def opening(image, radius=None, mask=None, footprint=None):\n '''Do a morphological opening\n \n image - pixel image to operate on\n radius - use a structuring element with the given radius. If no radius,\n use an 8-connected structuring element.\n mask - if present, only use unmasked pixels for operations\n '''\n eroded_image = grey_erosion(image, radius, mask, footprint)\n return grey_dilation(eroded_image, radius, mask, footprint)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef closing(image, radius=None, mask=None, footprint = None):\n '''Do a morphological closing\n \n image - pixel image to operate on\n radius - use a structuring element with the given radius. If no structuring\n element, use an 8-connected structuring element.\n mask - if present, only use unmasked pixels for operations\n '''\n dilated_image = grey_dilation(image, radius, mask, footprint)\n return grey_erosion(dilated_image, radius, mask, footprint)", "response": "Do a morphological closing\n \n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nperforming morphological opening along lines of different angles.", "response": "def openlines(image, linelength=10, dAngle=10, mask=None):\n \"\"\"\n Do a morphological opening along lines of different angles.\n\n Return difference between max and min response to different angles for each pixel.\n This effectively removes dots and only keeps lines.\n\n image - pixel image to operate on\n length - length of the structural element\n angluar_resolution - angle step for the rotating lines\n mask - if present, only use unmasked pixels for operations\n \"\"\"\n nAngles = 180//dAngle\n openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype)\n\n for iAngle in range(nAngles):\n angle = dAngle * iAngle\n se = strel_line(linelength,angle)\n openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se)\n\n imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0)\n return imLines"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef table_lookup(image, table, border_value, iterations = None):\n '''Perform a morphological transform on an image, directed by its neighbors\n \n image - a binary image\n table - a 512-element table giving the transform of each pixel given\n the values of that pixel and its 8-connected neighbors.\n border_value - the value of pixels beyond the border of the image.\n This should test as True or False.\n \n The pixels are numbered like this:\n \n 0 1 2\n 3 4 5\n 6 7 8\n The index at a pixel is the sum of 2** for pixels\n that evaluate to true. \n '''\n #\n # Test for a table that never transforms a zero into a one:\n #\n center_is_zero = np.array([(x & 2**4) == 0 for x in range(2**9)])\n use_index_trick = False\n if (not np.any(table[center_is_zero]) and\n (np.issubdtype(image.dtype, bool) or np.issubdtype(image.dtype, int))):\n # Use the index trick\n use_index_trick = True\n invert = False\n elif (np.all(table[~center_is_zero]) and np.issubdtype(image.dtype, bool)):\n # All ones stay ones, invert the table and the image and do the trick\n use_index_trick = True\n invert = True\n image = ~ image\n # table index 0 -> 511 and the output is reversed\n table = ~ table[511-np.arange(512)]\n border_value = not border_value\n if use_index_trick:\n orig_image = image\n index_i, index_j, image = prepare_for_index_lookup(image, border_value)\n index_i, index_j = index_lookup(index_i, index_j, \n image, table, iterations)\n image = extract_from_image_lookup(orig_image, index_i, index_j)\n if invert:\n image = ~ image\n return image\n \n counter = 0\n while counter != iterations:\n counter += 1\n #\n # We accumulate into the indexer to get the index into the table\n # at each point in the image\n #\n if image.shape[0] < 3 or image.shape[1] < 3:\n image = image.astype(bool)\n indexer = np.zeros(image.shape,int)\n indexer[1:,1:] += image[:-1,:-1] * 2**0\n indexer[1:,:] += image[:-1,:] * 2**1\n indexer[1:,:-1] += image[:-1,1:] * 2**2\n \n indexer[:,1:] += image[:,:-1] * 2**3\n indexer[:,:] += image[:,:] * 2**4\n indexer[:,:-1] += image[:,1:] * 2**5\n \n indexer[:-1,1:] += image[1:,:-1] * 2**6\n indexer[:-1,:] += image[1:,:] * 2**7\n indexer[:-1,:-1] += image[1:,1:] * 2**8\n else:\n indexer = table_lookup_index(np.ascontiguousarray(image,np.uint8))\n if border_value:\n indexer[0,:] |= 2**0 + 2**1 + 2**2\n indexer[-1,:] |= 2**6 + 2**7 + 2**8\n indexer[:,0] |= 2**0 + 2**3 + 2**6\n indexer[:,-1] |= 2**2 + 2**5 + 2**8\n new_image = table[indexer]\n if np.all(new_image == image):\n break\n image = new_image\n return image", "response": "Perform a morphological transform on an image and a 512 - element table giving the transform of each pixel and its 8 - connected neighbors."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef pattern_of(index):\n '''Return the pattern represented by an index value'''\n return np.array([[index & 2**0,index & 2**1,index & 2**2],\n [index & 2**3,index & 2**4,index & 2**5],\n [index & 2**6,index & 2**7,index & 2**8]], bool)", "response": "Return the pattern represented by an index value"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef index_of(pattern):\n '''Return the index of a given pattern'''\n return (pattern[0,0] * 2**0 + pattern[0,1] * 2**1 + pattern[0,2] * 2**2 +\n pattern[1,0] * 2**3 + pattern[1,1] * 2**4 + pattern[1,2] * 2**5 +\n pattern[2,0] * 2**6 + pattern[2,1] * 2**7 + pattern[2,2] * 2**8)", "response": "Return the index of a given pattern"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nremove all pixels from an image except for branchpoints", "response": "def branchpoints(image, mask=None):\n '''Remove all pixels from an image except for branchpoints\n \n image - a skeletonized image\n mask - a mask of pixels excluded from consideration\n \n 1 0 1 ? 0 ?\n 0 1 0 -> 0 1 0\n 0 1 0 0 ? 0\n '''\n global branchpoints_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, branchpoints_table, False, 1)\n if not mask is None:\n result[~mask] = image[~mask]\n return result"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef branchings(image, mask=None):\n '''Count the number of branches eminating from each pixel\n \n image - a binary image\n mask - optional mask of pixels not to consider\n\n This is the count of the number of branches that\n eminate from a pixel. A pixel with neighbors fore\n and aft has branches fore and aft = 2. An endpoint\n has one branch. A fork has 3. Finally, there's\n the quadrabranch which has 4:\n 1 0 1\n 0 1 0 -> 4\n 1 0 1\n '''\n global branchings_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n #\n # Not a binary operation, so we do a convolution with the following\n # kernel to get the indices into the table.\n #\n kernel = np.array([[1,2,4],\n [8,16,32],\n [64,128,256]])\n indexer = scind.convolve(masked_image.astype(int), kernel,\n mode='constant').astype(int)\n result = branchings_table[indexer]\n return result", "response": "Count the number of branches eminating from each pixel in the image."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef bridge(image, mask=None, iterations = 1):\n '''Fill in pixels that bridge gaps.\n \n 1 0 0 1 0 0\n 0 0 0 -> 0 1 0\n 0 0 1 0 0 1\n '''\n global bridge_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, bridge_table, False, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Fill in pixels that bridge gaps."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef clean(image, mask=None, iterations = 1):\n '''Remove isolated pixels\n \n 0 0 0 0 0 0\n 0 1 0 -> 0 0 0\n 0 0 0 0 0 0\n \n Border pixels and pixels adjoining masks are removed unless one valid\n neighbor is true.\n '''\n global clean_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, clean_table, False, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Remove isolated pixels from an image and remove any pixels adjoining masks."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef diag(image, mask=None, iterations=1):\n '''4-connect pixels that are 8-connected\n \n 0 0 0 0 0 ?\n 0 0 1 -> 0 1 1\n 0 1 0 ? 1 ?\n \n '''\n global diag_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, diag_table, False, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "4-connect pixels that are 8-connected\n \n 0 0 0 0 0 ?\n 0 0 1 -> 0 1 1\n 0 1 0 ? 1 ?"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef endpoints(image, mask=None):\n '''Remove all pixels from an image except for endpoints\n \n image - a skeletonized image\n mask - a mask of pixels excluded from consideration\n \n 1 0 0 ? 0 0\n 0 1 0 -> 0 1 0\n 0 0 0 0 0 0\n '''\n global endpoints_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, endpoints_table, False, 1)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Remove all pixels from an image except for endpoints"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nfill isolated black pixels with the specified mask.", "response": "def fill(image, mask=None, iterations=1):\n '''Fill isolated black pixels\n \n 1 1 1 1 1 1\n 1 0 1 -> 1 1 1\n 1 1 1 1 1 1\n '''\n global fill_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = True\n result = table_lookup(masked_image, fill_table, True, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nfill 4 - connected black pixels with mask", "response": "def fill4(image, mask=None, iterations=1):\n '''Fill 4-connected black pixels\n \n x 1 x x 1 x\n 1 0 1 -> 1 1 1\n x 1 x x 1 x\n '''\n global fill4_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = True\n result = table_lookup(masked_image, fill4_table, True, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nremoving horizontal breaks from an image", "response": "def hbreak(image, mask=None, iterations=1):\n '''Remove horizontal breaks\n \n 1 1 1 1 1 1\n 0 1 0 -> 0 0 0 (this case only)\n 1 1 1 1 1 1\n '''\n global hbreak_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, hbreak_table, False)\n if not mask is None:\n result[~mask] = image[~mask]\n return result"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef vbreak(image, mask=None, iterations=1):\n '''Remove horizontal breaks\n \n 1 1 1 1 1 1\n 0 1 0 -> 0 0 0 (this case only)\n 1 1 1 1 1 1\n '''\n global vbreak_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, vbreak_table, False)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Remove horizontal breaks from an image"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef majority(image, mask=None, iterations=1):\n '''A pixel takes the value of the majority of its neighbors\n \n '''\n global majority_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, majority_table, False, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "A pixel takes the value of the majority of its neighbors\n \n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef remove(image, mask=None, iterations=1):\n '''Turn 1 pixels to 0 if their 4-connected neighbors are all 0\n \n ? 1 ? ? 1 ?\n 1 1 1 -> 1 0 1\n ? 1 ? ? 1 ?\n '''\n global remove_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, remove_table, False)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Remove the image from the internal cache."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef spur(image, mask=None, iterations=1):\n '''Remove spur pixels from an image\n \n 0 0 0 0 0 0\n 0 1 0 -> 0 0 0\n 0 0 1 0 0 ?\n '''\n global spur_table_1,spur_table_2\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, \n False)\n if iterations is None:\n iterations = len(index_i)\n for i in range(iterations):\n for table in (spur_table_1, spur_table_2):\n index_i, index_j = index_lookup(index_i, index_j, \n masked_image, table, 1)\n masked_image = extract_from_image_lookup(image, index_i, index_j)\n if not mask is None:\n masked_image[~mask] = image[~mask]\n return masked_image", "response": "Remove spur pixels from an image"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef thicken(image, mask=None, iterations=1):\n '''Thicken the objects in an image where doing so does not connect them\n \n 0 0 0 ? ? ?\n 0 0 0 -> ? 1 ?\n 0 0 1 ? ? ?\n \n 1 0 0 ? ? ?\n 0 0 0 -> ? 0 ?\n 0 0 1 ? ? ?\n '''\n global thicken_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, thicken_table, False, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Thicken the objects in an image where doing so does not connect them"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nthin an image to lines preserving Euler number and thinning with Two Subiteration Algorithms", "response": "def thin(image, mask=None, iterations=1):\n '''Thin an image to lines, preserving Euler number\n \n Implements thinning as described in algorithm # 1 from\n Guo, \"Parallel Thinning with Two Subiteration Algorithms\",\n Communications of the ACM, Vol 32 #3 page 359.\n '''\n global thin_table, eight_connect\n if thin_table is None:\n thin_table = np.zeros((2,512),bool)\n for i in range(512):\n if (i & 16) == 0:\n # All zeros -> 0\n continue\n pat = pattern_of(i & ~ 16)\n ipat = pat.astype(int)\n if scind.label(pat, eight_connect)[1] != 1:\n thin_table[:,i] = True\n continue\n n1 = ((ipat[0,0] or ipat[0,1]) + (ipat[0,2] or ipat[1,2])+\n (ipat[2,2] or ipat[2,1]) + (ipat[2,0] or ipat[1,0]))\n n2 = ((ipat[0,1] or ipat[0,2]) + (ipat[1,2] or ipat[2,2])+\n (ipat[2,1] or ipat[2,0]) + (ipat[1,0] or ipat[0,0]))\n if min(n1,n2) not in (2,3):\n thin_table[:,i] = True\n continue\n thin_table[0,i] = ((pat[0,1] or pat[0,2] or not pat[2,2]) and \n pat[1,2])\n thin_table[1,i] = ((pat[2,1] or pat[2,0] or not pat[0,0]) and\n pat[1,0])\n if mask is None:\n masked_image = image.copy()\n else:\n masked_image = image.copy()\n masked_image[~mask] = False\n index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, False)\n if iterations is None:\n iterations = len(index_i)\n for i in range(iterations):\n hit_count = len(index_i)\n for j in range(2):\n index_i, index_j, = index_lookup(index_i, index_j, \n masked_image,\n thin_table[j], 1)\n if hit_count == len(index_i):\n break\n masked_image = extract_from_image_lookup(image, index_i, index_j)\n if not mask is None:\n masked_image[~mask] = masked_image[~mask]\n return masked_image"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncolors a labels matrix so that no adjacent labels have the same color.", "response": "def color_labels(labels, distance_transform = False):\n '''Color a labels matrix so that no adjacent labels have the same color\n \n distance_transform - if true, distance transform the labels to find out\n which objects are closest to each other.\n \n Create a label coloring matrix which assigns a color (1-n) to each pixel\n in the labels matrix such that all pixels similarly labeled are similarly\n colored and so that no similiarly colored, 8-connected pixels have\n different labels.\n \n You can use this function to partition the labels matrix into groups\n of objects that are not touching; you can then operate on masks\n and be assured that the pixels from one object won't interfere with\n pixels in another.\n \n returns the color matrix\n '''\n if distance_transform:\n i,j = scind.distance_transform_edt(labels == 0, return_distances=False,\n return_indices = True)\n dt_labels = labels[i,j]\n else:\n dt_labels = labels\n # Get the neighbors for each object\n v_count, v_index, v_neighbor = find_neighbors(dt_labels)\n # Quickly get rid of labels with no neighbors. Greedily assign\n # all of these a color of 1\n v_color = np.zeros(len(v_count)+1,int) # the color per object - zero is uncolored\n zero_count = (v_count==0)\n if np.all(zero_count):\n # can assign all objects the same color\n return (labels!=0).astype(int)\n v_color[1:][zero_count] = 1\n v_count = v_count[~zero_count]\n v_index = v_index[~zero_count]\n v_label = np.argwhere(~zero_count).transpose()[0]+1\n # If you process the most connected labels first and use a greedy\n # algorithm to preferentially assign a label to an existing color,\n # you'll get a coloring that uses 1+max(connections) at most.\n #\n # Welsh, \"An upper bound for the chromatic number of a graph and\n # its application to timetabling problems\", The Computer Journal, 10(1)\n # p 85 (1967)\n #\n sort_order = np.lexsort([-v_count])\n v_count = v_count[sort_order]\n v_index = v_index[sort_order]\n v_label = v_label[sort_order]\n for i in range(len(v_count)):\n neighbors = v_neighbor[v_index[i]:v_index[i]+v_count[i]]\n colors = np.unique(v_color[neighbors])\n if colors[0] == 0:\n if len(colors) == 1:\n # only one color and it's zero. All neighbors are unlabeled\n v_color[v_label[i]] = 1\n continue\n else:\n colors = colors[1:]\n # The colors of neighbors will be ordered, so there are two cases:\n # * all colors up to X appear - colors == np.arange(1,len(colors)+1)\n # * some color is missing - the color after the first missing will\n # be mislabeled: colors[i] != np.arange(1, len(colors)+1)\n crange = np.arange(1,len(colors)+1)\n misses = crange[colors != crange]\n if len(misses):\n color = misses[0]\n else:\n color = len(colors)+1\n v_color[v_label[i]] = color\n return v_color[labels]"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef skeletonize(image, mask=None, ordering = None):\n '''Skeletonize the image\n \n Take the distance transform.\n Order the 1 points by the distance transform.\n Remove a point if it has more than 1 neighbor and if removing it\n does not change the Euler number.\n \n image - the binary image to be skeletonized\n \n mask - only skeletonize pixels within the mask\n \n ordering - a matrix of the same dimensions as the image. The matrix\n provides the ordering of the erosion with the lowest values\n being eroded first. The default is to use the distance transform.\n '''\n global eight_connect\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n #\n # Lookup table - start with only positive pixels.\n # Keep if # pixels in neighborhood is 2 or less\n # Keep if removing the pixel results in a different connectivity\n #\n table = (make_table(True,np.array([[0,0,0],[0,1,0],[0,0,0]],bool),\n np.array([[0,0,0],[0,1,0],[0,0,0]],bool)) &\n (np.array([scind.label(pattern_of(index), eight_connect)[1] !=\n scind.label(pattern_of(index & ~ 2**4),\n eight_connect)[1]\n for index in range(512) ]) |\n np.array([np.sum(pattern_of(index))<3 for index in range(512)])))\n \n if ordering is None:\n distance = scind.distance_transform_edt(masked_image)\n else:\n distance = ordering\n #\n # The processing order along the edge is critical to the shape of the\n # resulting skeleton: if you process a corner first, that corner will\n # be eroded and the skeleton will miss the arm from that corner. Pixels\n # with fewer neighbors are more \"cornery\" and should be processed last.\n #\n cornerness_table = np.array([9-np.sum(pattern_of(index))\n for index in range(512)])\n corner_score = table_lookup(masked_image, cornerness_table, False,1)\n i,j = np.mgrid[0:image.shape[0],0:image.shape[1]]\n result=masked_image.copy()\n distance = distance[result]\n i = np.ascontiguousarray(i[result],np.int32)\n j = np.ascontiguousarray(j[result],np.int32)\n result=np.ascontiguousarray(result,np.uint8)\n #\n # We use a random # for tiebreaking. Assign each pixel in the image a\n # predictable, random # so that masking doesn't affect arbitrary choices\n # of skeletons\n #\n np.random.seed(0)\n tiebreaker=np.random.permutation(np.arange(np.product(masked_image.shape)))\n tiebreaker.shape=masked_image.shape\n order = np.lexsort((tiebreaker[masked_image],\n corner_score[masked_image],\n distance))\n order = np.ascontiguousarray(order, np.int32)\n table = np.ascontiguousarray(table, np.uint8)\n skeletonize_loop(result, i, j, order, table)\n \n result = result.astype(bool)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", "response": "Skeletonize the image into a single skeleton."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nlabels a skeleton so that each edge has a unique label.", "response": "def label_skeleton(skeleton):\n '''Label a skeleton so that each edge has a unique label\n \n This operation produces a labels matrix where each edge between\n two branchpoints has a different label. If the skeleton has been\n properly eroded, there are three kinds of points:\n 1) point adjacent to 0 or 1 other points = end of edge\n 2) point adjacent to two other points = in middle of edge\n 3) point adjacent to more than two other points = at end of edge\n connecting to another edge\n 4) a branchpoint\n \n We do all connected components here where components are 8-connected\n but a point in category 3 can't connect to another point in category 3.\n \n Returns the labels matrix and the count as a tuple\n '''\n bpts = branchpoints(skeleton)\n #\n # Count the # of neighbors per point\n #\n neighbors = scind.convolve(skeleton.astype(int), np.ones((3,3),int),\n mode='constant').astype(int)\n neighbors[~skeleton] = 0\n neighbors[skeleton] -= 1\n #\n # Find the i/j coordinates of the relevant points\n #\n i,j = np.mgrid[0:skeleton.shape[0], 0:skeleton.shape[1]]\n skeleton_minus_bpts = skeleton & ~ bpts\n si = i[skeleton_minus_bpts]\n sj = j[skeleton_minus_bpts]\n bi = i[bpts]\n bj = j[bpts]\n i = np.hstack((bi, si))\n j = np.hstack((bj, sj))\n b_vnum = np.arange(len(bi))\n s_vnum = np.arange(len(si)) + len(bi)\n all_vnum = np.hstack((b_vnum, s_vnum))\n vertex_numbers=np.zeros(skeleton.shape, int)\n vertex_numbers[i,j] = all_vnum\n #\n # src and dest are the vertices linked by edges. Their values are the\n # vertex numbers. First, link every vertex to itself\n #\n src = all_vnum\n dest = all_vnum\n #\n # Now, for the non-branchpoints, link to all 8-connected neighbors\n # while obeying the rules\n #\n for ioff, joff in ((-1,-1), (-1,0), (-1,1),\n ( 0,-1), ( 0,1),\n ( 1,-1), ( 1,0), ( 1,1)):\n consider = np.ones(len(si), bool)\n if ioff == -1:\n consider = si > 0\n elif ioff == 1:\n consider = si < skeleton.shape[0] - 1\n if joff == -1:\n consider = consider & (sj > 0)\n elif joff == 1:\n consider = consider & (sj < skeleton.shape[1] - 1)\n #\n # Forge a link if the offset point is in the skeleton\n #\n ci = si[consider]\n cj = sj[consider]\n link = (skeleton_minus_bpts[ci+ioff, cj+joff] &\n ((neighbors[ci,cj] < 3) | (neighbors[ci+ioff, cj+joff] < 3)))\n ci = ci[link]\n cj = cj[link]\n src = np.hstack((src, vertex_numbers[ci, cj]))\n dest = np.hstack((dest, vertex_numbers[ci+ioff, cj+joff]))\n \n labeling = all_connected_components(src, dest)\n vertex_numbers[i,j] = labeling + 1\n return (vertex_numbers, \n 0 if len(labeling) == 0 else int(np.max(labeling)) + 1)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncomputes the length of all skeleton branches for labeled skeletons.", "response": "def skeleton_length(labels, indices=None):\n '''Compute the length of all skeleton branches for labeled skeletons\n \n labels - a labels matrix\n indices - the indexes of the labels to be measured. Default is all\n \n returns an array of one skeleton length per label.\n '''\n global __skel_length_table\n if __skel_length_table is None:\n tbl = np.zeros(512, np.float32)\n for ii in range(-1, 2):\n for jj in range(-1, 2):\n if ii == 0 and jj == 0:\n continue\n #\n # Set the bit to search for and the center bit\n #\n idx = 2 ** (ii + 1 + (jj + 1) * 3) | 16\n mask = (np.arange(512) & idx) == idx\n #\n # If we are four-connected to another pixel that is\n # connected to this one, they are 8-connected and that\n # is the distance.\n #\n # bad good\n # x 1 0 0 0 0\n # x 1 1 0 1 0\n # x x x 0 x 1\n if ii != 0 and jj != 0:\n for adjacent_i, adjacent_j in (\n (ii-1, jj), (ii, jj-1), (ii+1, jj), (ii, jj+1)):\n if any([_ < -1 or _ > 1 \n for _ in (adjacent_i, adjacent_j)]):\n continue\n aidx = 2 ** (adjacent_i+1 + (adjacent_j + 1) * 3)\n mask = mask & ((np.arange(512) & aidx) != aidx)\n tbl[mask] += np.sqrt(ii*ii + jj*jj) / 2\n __skel_length_table = tbl\n if indices is None:\n indices = np.arange(1, np.max(labels)+1)\n else:\n indices = np.asanyarray(indices)\n if len(indices) == 0:\n return np.zeros(0)\n score = __skel_length_table[table_idx_from_labels(labels)]\n \n result = np.bincount(labels.ravel(), \n weights = score.ravel(),\n minlength=np.max(indices)+1)\n return result[indices]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef distance_to_edge(labels):\n '''Compute the distance of a pixel to the edge of its object\n \n labels - a labels matrix\n \n returns a matrix of distances\n '''\n colors = color_labels(labels)\n max_color = np.max(colors)\n result = np.zeros(labels.shape)\n if max_color == 0:\n return result\n \n for i in range(1, max_color+1):\n mask = (colors==i)\n result[mask] = scind.distance_transform_edt(mask)[mask]\n return result", "response": "Compute the distance of a pixel to the edge of its object\n \n returns a matrix of distances"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef regional_maximum(image, mask = None, structure=None, ties_are_ok=False):\n '''Return a binary mask containing only points that are regional maxima\n \n image - image to be transformed\n mask - mask of relevant pixels\n structure - binary structure giving the neighborhood and connectivity\n in which to search for maxima. Default is 8-connected.\n ties_are_ok - if this is true, then adjacent points of the same magnitude\n are rated as maxima. \n \n Find locations for which all neighbors as defined by the structure have\n lower values. The algorithm selects only one of a set of adjacent locations\n with identical values, first using a distance transform to find the\n innermost location, then, among equals, selected randomly.\n \n A location cannot be a local maximum if it is touching the edge or a\n masked pixel.\n '''\n global eight_connect\n if not ties_are_ok:\n #\n # Get an an initial pass with the ties.\n #\n result = regional_maximum(image, mask, structure, True)\n if not np.any(result):\n return result\n distance = scind.distance_transform_edt(result)\n #\n # Rank-order the distances and then add a randomizing factor\n # to break ties for pixels equidistant from the background.\n # Pick the best value within a contiguous region\n #\n labels, label_count = scind.label(result, eight_connect)\n np.random.seed(0)\n ro_distance = rank_order(distance)[0].astype(float)\n count = np.product(ro_distance.shape)\n ro_distance.flat += (np.random.permutation(count).astype(float) / \n float(count))\n positions = scind.maximum_position(ro_distance, labels,\n np.arange(label_count)+1)\n positions = np.array(positions, np.uint32)\n result = np.zeros(image.shape, bool)\n if positions.ndim == 1:\n result[positions[0],positions[1]] = True\n else:\n result[positions[:,0],positions[:,1]] = True\n return result\n result = np.ones(image.shape,bool)\n if structure is None:\n structure = scind.generate_binary_structure(image.ndim, image.ndim)\n #\n # The edges of the image are losers because they are touching undefined\n # points. Construct a big mask that represents the edges.\n #\n big_mask = np.zeros(np.array(image.shape) + np.array(structure.shape), bool)\n structure_half_shape = np.array(structure.shape)//2\n big_mask[structure_half_shape[0]:structure_half_shape[0]+image.shape[0],\n structure_half_shape[1]:structure_half_shape[1]+image.shape[1]]=\\\n mask if mask is not None else True\n for i in range(structure.shape[0]):\n off_i = i-structure_half_shape[0]\n for j in range(structure.shape[1]):\n if i == structure_half_shape[0] and j == structure_half_shape[1]:\n continue\n off_j = j-structure_half_shape[1]\n if structure[i,j]:\n result = np.logical_and(result, big_mask[i:i+image.shape[0],\n j:j+image.shape[1]])\n #\n # Get the boundaries of the source image and the offset\n # image so we can make similarly shaped, but offset slices\n #\n src_i_min = max(0,-off_i)\n src_i_max = min(image.shape[0], image.shape[0]-off_i)\n off_i_min = max(0,off_i)\n off_i_max = min(image.shape[0], image.shape[0]+off_i)\n src_j_min = max(0,-off_j)\n src_j_max = min(image.shape[1], image.shape[1]-off_j)\n off_j_min = max(0,off_j)\n off_j_max = min(image.shape[1], image.shape[1]+off_j)\n min_mask = (image[src_i_min:src_i_max,\n src_j_min:src_j_max] <\n image[off_i_min:off_i_max,\n off_j_min:off_j_max])\n result[src_i_min:src_i_max,\n src_j_min:src_j_max][min_mask] = False\n return result", "response": "Return a binary mask containing only points that are regional maxima."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nassociating each label in i with a component # This function finds all connected components given an array of associations between labels i and j using a depth-first search. i & j give the edges of the graph. The first step of the algorithm makes bidirectional edges, (i->j and j<-i), so it's best to only send the edges in one direction (although the algorithm can withstand duplicates). returns a label for each vertex up to the maximum named vertex in i.", "response": "def all_connected_components(i,j):\n '''Associate each label in i with a component #\n \n This function finds all connected components given an array of\n associations between labels i and j using a depth-first search.\n \n i & j give the edges of the graph. The first step of the algorithm makes\n bidirectional edges, (i->j and j<-i), so it's best to only send the\n edges in one direction (although the algorithm can withstand duplicates).\n \n returns a label for each vertex up to the maximum named vertex in i.\n '''\n if len(i) == 0:\n return i\n i1 = np.hstack((i,j))\n j1 = np.hstack((j,i))\n order = np.lexsort((j1,i1))\n i=np.ascontiguousarray(i1[order],np.uint32)\n j=np.ascontiguousarray(j1[order],np.uint32)\n #\n # Get indexes and counts of edges per vertex\n #\n counts = np.ascontiguousarray(np.bincount(i.astype(int)),np.uint32)\n indexes = np.ascontiguousarray(np.cumsum(counts)-counts,np.uint32)\n #\n # This stores the lowest index # during the algorithm - the first\n # vertex to be labeled in a connected component.\n #\n labels = np.zeros(len(counts), np.uint32)\n _all_connected_components(i,j,indexes,counts,labels)\n return labels"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn all permutations of a set of groups This routine takes two vectors: i - the label of each group j - the members of the group. For instance, take a set of two groups with several members each: i | j ------ 1 | 1 1 | 2 1 | 3 2 | 1 2 | 4 2 | 5 2 | 6 The output will be i | j1 | j2 ----------- 1 | 1 | 2 1 | 1 | 3 1 | 2 | 3 2 | 1 | 4 2 | 1 | 5 2 | 1 | 6 2 | 4 | 5 2 | 4 | 6 2 | 5 | 6 etc", "response": "def pairwise_permutations(i, j):\n '''Return all permutations of a set of groups\n \n This routine takes two vectors:\n i - the label of each group\n j - the members of the group.\n \n For instance, take a set of two groups with several members each:\n \n i | j\n ------\n 1 | 1\n 1 | 2\n 1 | 3\n 2 | 1\n 2 | 4\n 2 | 5\n 2 | 6\n \n The output will be\n i | j1 | j2\n -----------\n 1 | 1 | 2\n 1 | 1 | 3\n 1 | 2 | 3\n 2 | 1 | 4\n 2 | 1 | 5\n 2 | 1 | 6\n 2 | 4 | 5\n 2 | 4 | 6\n 2 | 5 | 6\n etc\n '''\n if len(i) == 0:\n return (np.array([], int), np.array([], j.dtype), np.array([], j.dtype))\n #\n # Sort by i then j\n #\n index = np.lexsort((j,i))\n i=i[index]\n j=j[index]\n #\n # map the values of i to a range r\n #\n r_to_i = np.sort(np.unique(i))\n i_to_r_off = np.min(i)\n i_to_r = np.zeros(np.max(i)+1-i_to_r_off, int)\n i_to_r[r_to_i - i_to_r_off] = np.arange(len(r_to_i))\n #\n # Advance the value of r by one each time i changes\n #\n r = np.cumsum(np.hstack(([False], i[:-1] != i[1:])))\n #\n # find the counts per item\n #\n src_count = np.bincount(r)\n #\n # The addresses of the starts of each item\n #\n src_idx = np.hstack(([0], np.cumsum(src_count[:-1])))\n #\n # The sizes of each destination item: n * (n - 1) / 2\n # This is the number of permutations of n items against themselves.\n #\n dest_count = src_count * (src_count - 1) // 2\n #\n # The indexes of the starts of each destination item (+ total sum at end)\n #\n dest_idx = np.hstack(([0], np.cumsum(dest_count)))\n dest_size = dest_idx[-1]\n #\n # Allocate the destination arrays\n #\n d_r = np.zeros(dest_size, i.dtype)\n d_j1, d_j2 = np.zeros((2, dest_size), j.dtype)\n #\n # Mark the first item in the destination and then do a cumulative\n # sum trick ( (1 + 0 + 0 + 0 + 1 + 0 + 0) - 1 = \n # 0 , 0 , 0 , 0, 1, 1 , 1)\n # to label each member of d_i\n #\n not_empty_indices = np.arange(0, len(dest_idx)-1)\n not_empty_indices = not_empty_indices[dest_idx[:-1] != dest_idx[1:]]\n increments = not_empty_indices - np.hstack([[0], not_empty_indices[:-1]])\n d_r[dest_idx[not_empty_indices]] = increments\n d_r = np.cumsum(d_r)\n d_i = r_to_i[d_r]\n #\n # Index each member of the destination array relative to its start. The\n # above array would look like this: [0, 1, 2, 3, 0, 1, 2]\n #\n d_r_idx = np.arange(len(d_r)) - dest_idx[d_r]\n #\n # We can use a 2x2 matrix to look up which j1 and j2 for each of\n # the d_r_idx. The first slot in the matrix is the number of values\n # to permute (from src_count[d_r]) and the second slot is d_r_idx\n # So here, we make a sparse array for the unique values of src_count\n #\n unique_src_count = np.unique(src_count)\n unique_dest_len = (unique_src_count * (unique_src_count - 1) // 2).astype(int)\n #\n # src_count repeated once per permutation\n #\n i_sparse = np.hstack([np.ones(dlen, int) * c \n for c, dlen in zip(unique_src_count,\n unique_dest_len)])\n #\n # The indexes from zero to the # of permutations\n #\n j_sparse = np.hstack([np.arange(dlen) for dlen in unique_dest_len])\n #\n # Repeat 0 n-1 times, 1 n-2 times, etc to get the first indexes in\n # the permutation.\n #\n v_j1_sparse = np.hstack(\n [ np.hstack(\n [np.ones(n-x-1, int) * x for x in range(n)]) \n for n in unique_src_count])\n #\n # Spit out a range from 1 to n-1, 2 to n-1, etc\n #\n v_j2_sparse = np.hstack(\n [ np.hstack(\n [np.arange(x+1, n) for x in range(n)])\n for n in unique_src_count])\n \n if len(i_sparse) > 0:\n j1_sparse = scipy.sparse.coo_matrix((v_j1_sparse, \n (i_sparse, j_sparse))).tocsc()\n j2_sparse = scipy.sparse.coo_matrix((v_j2_sparse, \n (i_sparse, j_sparse))).tocsc()\n else:\n j1_sparse = j2_sparse = np.array([[]], j.dtype)\n #\n # And now we can spit out the j1 and j2 dest. This is whatever element\n # from the group in j indexed by either j1 or j2 sparse. We find the\n # group's start by using d_r to look up the group's start.\n #\n d_j1_idx = np.array(j1_sparse[src_count[d_r], d_r_idx]).flatten()\n d_j1 = j[src_idx[d_r] + d_j1_idx]\n d_j2_idx = np.array(j2_sparse[src_count[d_r], d_r_idx]).flatten()\n d_j2 = j[src_idx[d_r] + d_j2_idx]\n \n return (d_i, d_j1, d_j2)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef is_local_maximum(image, labels, footprint):\n '''Return a boolean array of points that are local maxima\n \n image - intensity image\n labels - find maxima only within labels. Zero is reserved for background.\n footprint - binary mask indicating the neighborhood to be examined\n must be a matrix with odd dimensions, center is taken to\n be the point in question.\n '''\n assert((np.all(footprint.shape) & 1) == 1)\n footprint = (footprint != 0)\n footprint_extent = (np.array(footprint.shape)-1) // 2\n if np.all(footprint_extent == 0):\n return labels > 0\n result = (labels > 0).copy()\n #\n # Create a labels matrix with zeros at the borders that might be\n # hit by the footprint.\n #\n big_labels = np.zeros(np.array(labels.shape) + footprint_extent*2,\n labels.dtype)\n big_labels[[slice(fe,-fe) for fe in footprint_extent]] = labels\n #\n # Find the relative indexes of each footprint element\n #\n image_strides = np.array(image.strides) // image.dtype.itemsize\n big_strides = np.array(big_labels.strides) // big_labels.dtype.itemsize\n result_strides = np.array(result.strides) // result.dtype.itemsize\n footprint_offsets = np.mgrid[[slice(-fe,fe+1) for fe in footprint_extent]]\n footprint_offsets = footprint_offsets[:, footprint]\n #\n # Order by distance, low to high and get rid of center pt.\n #\n d = np.sum(footprint_offsets **2, 0)\n footprint_offsets, d = footprint_offsets[:, d > 0], d[d > 0]\n footprint_offsets = footprint_offsets[:, np.lexsort([d])]\n \n fp_image_offsets = np.sum(image_strides[:, np.newaxis] *\n footprint_offsets, 0)\n fp_big_offsets = np.sum(big_strides[:, np.newaxis] *\n footprint_offsets, 0)\n #\n # Get the index of each labeled pixel in the image and big_labels arrays\n #\n indexes = np.mgrid[[slice(0,x) for x in labels.shape]][:, labels > 0]\n image_indexes = np.sum(image_strides[:, np.newaxis] * indexes, 0)\n big_indexes = np.sum(big_strides[:, np.newaxis] * \n (indexes + footprint_extent[:, np.newaxis]), 0)\n result_indexes = np.sum(result_strides[:, np.newaxis] * indexes, 0)\n #\n # Now operate on the raveled images\n #\n big_labels_raveled = big_labels.ravel()\n image_raveled = image.ravel()\n result_raveled = result.ravel()\n #\n # A hit is a hit if the label at the offset matches the label at the pixel\n # and if the intensity at the pixel is greater or equal to the intensity\n # at the offset.\n #\n for fp_image_offset, fp_big_offset in zip(fp_image_offsets, fp_big_offsets):\n same_label = (big_labels_raveled[big_indexes + fp_big_offset] ==\n big_labels_raveled[big_indexes])\n less_than = (image_raveled[image_indexes[same_label]] <\n image_raveled[image_indexes[same_label]+ fp_image_offset])\n mask = ~same_label\n mask[same_label] = ~less_than\n result_raveled[result_indexes[~mask]] = False\n result_indexes = result_indexes[mask]\n big_indexes = big_indexes[mask]\n image_indexes = image_indexes[mask]\n \n return result", "response": "Return a boolean array of points that are local maximum within image."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef angular_distribution(labels, resolution=100, weights=None):\n '''For each object in labels, compute the angular distribution\n around the centers of mass. Returns an i x j matrix, where i is\n the number of objects in the label matrix, and j is the resolution\n of the distribution (default 100), mapped from -pi to pi.\n\n Optionally, the distributions can be weighted by pixel.\n\n The algorithm approximates the angular width of pixels relative to\n the object centers, in an attempt to be accurate for small\n objects.\n\n The ChordRatio of an object can be approximated by \n >>> angdist = angular_distribution(labels, resolution)\n >>> angdist2 = angdist[:, :resolution//2] + angdist[:, resolution//2] # map to widths, rather than radii\n >>> chord_ratio = np.sqrt(angdist2.max(axis=1) / angdist2.min(axis=1)) # sqrt because these are sectors, not triangles\n '''\n if weights is None:\n weights = np.ones(labels.shape)\n maxlabel = labels.max()\n ci, cj = centers_of_labels(labels)\n j, i = np.meshgrid(np.arange(labels.shape[0]), np.arange(labels.shape[1]))\n # compute deltas from pixels to object centroids, and mask to labels\n di = i[labels > 0] - ci[labels[labels > 0] - 1]\n dj = j[labels > 0] - cj[labels[labels > 0] - 1]\n weights = weights[labels > 0]\n labels = labels[labels > 0]\n # find angles, and angular width of pixels\n angle = np.arctan2(di, dj)\n # Use pixels of width 2 to get some smoothing\n width = np.arctan(1.0 / np.sqrt(di**2 + dj**2 + np.finfo(float).eps))\n # create an onset/offset array of size 3 * resolution\n lo = np.clip((angle - width) * resolution / (2 * np.pi), -resolution, 2 * resolution).astype(int) + resolution\n hi = np.clip((angle + width) * resolution / (2 * np.pi), -resolution, 2 * resolution).astype(int) + resolution\n # make sure every pixel counts at least once\n hi[lo == hi] += 1\n # normalize weights by their angular width (adding a bit to avoid 0 / 0)\n weights /= (hi - lo)\n onset = scipy.sparse.coo_matrix((weights, (labels - 1, lo)), (maxlabel, 3 * resolution)).toarray()\n offset = scipy.sparse.coo_matrix((weights, (labels - 1, hi)), (maxlabel, 3 * resolution)).toarray()\n # sum onset/offset to get actual distribution\n onoff = onset - offset\n dist = np.cumsum(onoff, axis=1)\n dist = dist[:, :resolution] + dist[:, resolution:2*resolution] + dist[:, 2*resolution:]\n return dist", "response": "Compute the angular distribution of objects in a set of labels."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef feret_diameter(chulls, counts, indexes):\n '''Return the minimum and maximum Feret diameter for each object\n \n This function takes the convex hull data, as generated by convex_hull\n and returns the minimum and maximum Feret diameter for each convex hull.\n \n chulls - an n x 3 matrix giving the label #, the i coordinate and the\n j coordinate for each vertex in the convex hull\n counts - the number of points in each convex hull\n \n returns two arrays, the minimum and maximum diameter per object.\n '''\n #\n # Method taken from Alsuwaiyel, \"Algorithms, Design Techniques and\n # Analysis\", World Scientific, 2003\n #\n # Given a vertex, p1 on the convex hull composed of points p1 to pm,\n # there is a line, pm <-> p1. Going from p(m-1) to p2, find the\n # distance from the point to the line until the next point in the\n # series has a lesser distance. This point forms an antipodal pair\n # with p1. Now take the line from p1 to p2 and continue to add points\n # as long as the point has a greater distance from the line than its\n # successor. The final point forms an antipodal point with both p1\n # and its succesor vertex.\n #\n # Note: as far as I (leek) can figure out, there is a bug here for\n # the case of a square:\n #\n # 0 1\n #\n #\n # 3 2\n #\n # 0 - 1 is generated first, then 0 - 2 and 0 - 3\n # Then 1 - 3 is generated, missing 1 - 2. The author is only interested\n # in max Feret diameter as are we.\n #\n # The minimum Feret diameter always has a parallel line of support\n # that runs through two antipodal pairs and the second that either\n # runs through an antipodal pair or a vertex.\n #\n if len(counts) == 0:\n return np.zeros(0)\n save_counts = counts\n counts = np.atleast_1d(counts)\n indexes = np.atleast_1d(indexes)\n chull_idx = np.hstack(([0], np.cumsum(counts[:-1])))\n save_chull_idx = chull_idx\n chull_pts = chulls[:,1:].astype(float)\n #\n # A list of antipode arrays\n #\n antipodes = []\n #\n # Get rid of degenerate cases\n #\n # A single point is its own antipode\n #\n antipodes.append(np.column_stack([chull_idx[counts==1]] * 2))\n #\n # A line has it's two points as antipodes of each other.\n #\n antipodes.append(np.column_stack([chull_idx[counts==2],\n chull_idx[counts==2] + 1]))\n chull_idx = chull_idx[counts > 2]\n counts = counts[counts > 2]\n if len(counts) > 0:\n #\n # Calculate distances from every vertex other than pm and p1\n # to the line between pm and p1\n #\n pm_idx = (chull_idx + counts - 1)\n pm = chull_pts[pm_idx,:]\n p1 = chull_pts[chull_idx,:]\n # There are counts - 2 vertices to be examined\n vvv = Indexes([counts-2])\n v_idx = chull_idx[vvv.rev_idx] + vvv.idx[0] + 1\n distances = distance2_to_line(chull_pts[v_idx],\n pm[vvv.rev_idx],\n p1[vvv.rev_idx])\n #\n # Find the largest distance, settling ties by picking the\n # one with the largest within-object index.\n #\n order = np.lexsort((vvv.idx[0], -distances, vvv.rev_idx))\n antipode_idx = order[vvv.fwd_idx] + 1 - vvv.fwd_idx\n vertex_idx = np.zeros(len(antipode_idx), int)\n while len(chull_idx) > 0:\n #\n # Add the current antipode / vertex pair\n #\n antipodes.append(np.column_stack((chull_idx + vertex_idx, \n chull_idx + antipode_idx)))\n if len(chull_idx) == 0:\n break\n #\n # Get the distance from the line to the current antipode\n #\n dc = distance2_to_line(chull_pts[chull_idx + antipode_idx, :],\n chull_pts[chull_idx + vertex_idx, :],\n chull_pts[chull_idx +vertex_idx + 1, :])\n #\n # Get the distance from the line to the next antipode\n #\n next_antipode_idx = antipode_idx + 1\n next_antipode_idx[next_antipode_idx == counts] = 0\n dn = distance2_to_line(chull_pts[chull_idx + next_antipode_idx, :],\n chull_pts[chull_idx + vertex_idx, :],\n chull_pts[chull_idx + vertex_idx + 1, :])\n #\n # If the distance to the next candidate is larger, advance\n #\n advance_antipode = dc <= dn\n antipode_idx[advance_antipode] += 1\n #\n # Otherwise, move to the next vertex.\n #\n vertex_idx[~ advance_antipode] += 1\n #\n # Get rid of completed convex hulls\n #\n to_keep = (antipode_idx < counts) & (vertex_idx != antipode_idx)\n antipode_idx = antipode_idx[to_keep]\n vertex_idx = vertex_idx[to_keep]\n chull_idx = chull_idx[to_keep]\n counts = counts[to_keep]\n antipodes = np.vstack(antipodes)\n l = chulls[antipodes[:,0], 0]\n \n pt1 = chulls[antipodes[:,0],1:]\n pt2 = chulls[antipodes[:,1],1:]\n distances = np.sum((pt1 - pt2) **2, 1)\n \n max_distance = np.sqrt(fixup_scipy_ndimage_result(scind.maximum(distances, l, indexes)))\n #\n # For the minimum distance, we have to take the distance from each\n # antipode to any line between alternate successive antipodes.\n #\n counts = save_counts\n chull_idx = save_chull_idx\n indexer = Indexes(counts)\n # Get rid of degenerates\n #\n degenerates = antipodes[:,0] == antipodes[:,1]\n if np.all(degenerates):\n return np.zeros(len(indexes)), max_distance\n l = l[~degenerates]\n antipodes = antipodes[~degenerates]\n # We duplicate the list so each antipode appears as both a first and second\n #\n l = np.hstack((l,l))\n antipodes = np.vstack((antipodes, antipodes[:,::-1]))\n v2_idx = indexer.idx[0][antipodes[:,1]]\n #\n # Vertex number zero can be the vertex after the last one. We therefore\n # add a second vertex of \"count\" for each one where the second vertex is zero.\n #\n extras = antipodes[v2_idx == 0,:]\n antipodes = np.vstack([antipodes, extras])\n l = np.hstack((l, indexes[indexer.rev_idx[extras[:,0]]]))\n v2_idx = np.hstack((v2_idx, indexer.counts[0][indexer.rev_idx[extras[:,1]]]))\n #\n # We sort by label, first and second to order them for the search.\n #\n order = np.lexsort((v2_idx, antipodes[:,0], l))\n antipodes = antipodes[order,:]\n l = l[order]\n v2_idx = v2_idx[order]\n #\n # Now, we only want antipode pairs where:\n # * the label is the same as it's successor\n # * the first antipode is the same as it's successor\n # * the second antipode is one less than it's successor\n #\n v1_idx = indexer.idx[0][antipodes[:,0]]\n good = np.hstack(((l[1:] == l[:-1]) & \n (v1_idx[1:] == v1_idx[:-1]) &\n (v2_idx[1:] == v2_idx[:-1]+1), [False]))\n if not np.any(good):\n return np.zeros(len(indexes)), max_distance\n antipodes = antipodes[good,:]\n l = l[good]\n v2_idx = v2_idx[good]\n v = chull_pts[antipodes[:,0],:]\n l0 = chull_pts[antipodes[:,1],:]\n #\n # The index of the second point in the line has to be done modulo\n #\n l1_idx = antipodes[:,1] + 1\n needs_modulo = v2_idx == indexer.counts[0][indexer.rev_idx[antipodes[:,1]]] - 1\n l1_idx[needs_modulo] -= indexer.counts[0][indexer.rev_idx[antipodes[needs_modulo,1]]]\n l1 = chull_pts[l1_idx,:]\n #\n # Compute the distances\n #\n distances = distance2_to_line(v, l0, l1)\n min_distance = np.sqrt(fixup_scipy_ndimage_result(scind.minimum(distances, l, indexes)))\n min_distance[np.isnan(min_distance)] = 0\n return min_distance, max_distance", "response": "This function takes the convex hull data generated by convex_hull\n and returns the minimum and maximum Feret diameter for each object."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef is_obtuse(p1, v, p2):\n '''Determine whether the angle, p1 - v - p2 is obtuse\n \n p1 - N x 2 array of coordinates of first point on edge\n v - N x 2 array of vertex coordinates\n p2 - N x 2 array of coordinates of second point on edge\n \n returns vector of booleans\n '''\n p1x = p1[:,1]\n p1y = p1[:,0]\n p2x = p2[:,1]\n p2y = p2[:,0]\n vx = v[:,1]\n vy = v[:,0]\n Dx = vx - p2x\n Dy = vy - p2y\n Dvp1x = p1x - vx\n Dvp1y = p1y - vy\n return Dvp1x * Dx + Dvp1y * Dy > 0", "response": "Determine whether the angle p1 - v - p2 is obtuse\n ."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngetting the outline points of objects in clockwise order.", "response": "def get_outline_pts(labels, idxs):\n '''Get the outline points of objects in clockwise order\n \n Given a labels matrix of contiguously-labeled objects, trace\n the exteriors of those objects to get the points of the outline\n in clockwise order.\n \n labels - a labels matrix\n \n idxs - return points for the labels named by this array\n \n Returns an Nx2 array of points, a vector of offsets to the first point\n for each object and a vector of counts of points per\n indexed object.\n '''\n idxs = np.atleast_1d(idxs)\n if len(idxs) == 0:\n return np.zeros((0, 2), int), np.zeros(0, int), np.zeros(0, int)\n #\n # Do some preprocessing on the labels matrix to get a map of chosen labels\n #\n end_label = max(np.max(labels), np.max(idxs)) + 1\n chosen_labels = np.zeros(int(end_label), bool)\n chosen_labels[idxs] = True\n #\n # We trace clockwise, conceptually standing \n # to the left of the outline and putting our right hand on the\n # outline.\n # We remember the direction we are going and that gives\n # us an order for the points. For instance, if we are going\n # north:\n #\n # 2 3 4\n # 1 x 5\n # 0 7 6\n #\n # If \"1\" is available, our new direction is southwest:\n #\n # 5 6 7\n # 4 x 0\n # 3 2 1\n #\n # Take direction 0 to be northeast (i-1, j-1). We look in\n # this order:\n #\n # 3 4 5\n # 2 x 6\n # 1 0 7\n #\n # The directions are\n #\n # 0 1 2\n # 7 3\n # 6 5 4\n #\n traversal_order = np.array(\n # i j new direction\n (( 1, 0, 5 ),\n ( 1, -1, 6 ),\n ( 0, -1, 7 ),\n ( -1, -1, 0 ),\n ( -1, 0, 1 ),\n ( -1, 1, 2 ),\n ( 0, 1, 3 ),\n ( 1, 1, 4 )))\n #\n # Pad the labels matrix if any objects touch the border. This keeps\n # us from having to handle border cases. For many labels matrices,\n # no object touches the border\n #\n border = np.hstack((labels[0, :], labels[:, 0], labels[-1, :], labels[:, -1]))\n border = np.unique(border)\n if any([idx in border for idx in idxs]):\n big_labels = np.zeros((labels.shape[0]+2, labels.shape[1]+2), np.uint32)\n big_labels[1:-1, 1:-1] = labels\n offset = np.ones(2, int)\n labels = big_labels\n del big_labels\n else:\n labels = np.ascontiguousarray(labels, np.uint32)\n offset = np.zeros(2, int)\n assert isinstance(labels, np.ndarray)\n strides = np.array(labels.strides) // labels.dtype.itemsize\n assert strides[1] == 1, \"Last stride must be 1 for modulo unravel to work\"\n stride_table = np.ascontiguousarray(\n np.sum(traversal_order[:, :2] * strides, 1), np.int32)\n new_direction_table = np.ascontiguousarray(traversal_order[:, 2], np.int32)\n #\n # The worst case is a line - each point in the outline is visited twice.\n #\n consider_only_these = (\n (scind.grey_erosion(labels, footprint=np.ones((3,3), bool)) !=\n scind.grey_dilation(labels, footprint=np.ones((3,3), bool))) &\n chosen_labels[labels])\n max_output = np.sum(consider_only_these) * 2\n output = np.zeros(max_output, np.uint32)\n #\n # The algorithm starts at the northwest corner of every object. Find the\n # point with the minimum i**2 + j**2 for each object.\n #\n i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]\n rlabels = labels[consider_only_these]\n if len(rlabels) == 0:\n # Corner case - no pixels for *any* label\n return np.zeros((0, 2), int), np.zeros(len(idxs), int), np.zeros(len(idxs), int)\n i = i[consider_only_these]\n j = j[consider_only_these]\n d = i * i + j * j\n order = np.lexsort((d, rlabels))\n stride_pts = i[order] * strides[0] + j[order] * strides[1]\n rlabels = rlabels[order]\n\n first = np.hstack(([True], rlabels[:-1] != rlabels[1:]))\n first_strides = np.ascontiguousarray(stride_pts[first], np.uint32)\n idx_of_first = rlabels[first]\n output_count = np.zeros(len(idx_of_first), np.uint32)\n trace_outlines(labels.ravel(),\n first_strides,\n stride_table,\n new_direction_table,\n output,\n output_count)\n #\n # on output, we have to do some fixing up:\n #\n # need to convert output to i, j\n # need to arrange output_count in the same order as idxs\n #\n output = output[:np.sum(output_count)]\n mapper = np.zeros(np.max(idxs)+1, int)\n mapper[idxs] = np.arange(len(idxs))\n coord_offsets = np.zeros(len(idxs), output_count.dtype)\n coord_counts = np.zeros(len(idxs), output_count.dtype)\n coord_counts[mapper[idx_of_first]] = output_count\n if len(idx_of_first) > 1:\n coord_offsets[mapper[idx_of_first[1:]]] = np.cumsum(output_count[:-1],\n dtype=output_count.dtype)\n coords = np.column_stack((\n output // strides[0] - offset[0],\n output % strides[0] - offset[1]))\n return coords, coord_offsets, coord_counts"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncalculate the principal components of a vector of measurements and returns the correlation matrix that can be used to transform x into its components.", "response": "def princomp(x):\n \"\"\"Determine the principal components of a vector of measurements\n \n Determine the principal components of a vector of measurements\n x should be a M x N numpy array composed of M observations of n variables\n The output is:\n coeffs - the NxN correlation matrix that can be used to transform x into its components\n \n The code for this function is based on \"A Tutorial on Principal Component\n Analysis\", Shlens, 2005 http://www.snl.salk.edu/~shlens/pub/notes/pca.pdf\n (unpublished)\n \"\"\"\n \n (M,N) = x.shape\n Mean = x.mean(0)\n y = x - Mean\n cov = numpy.dot(y.transpose(),y) / (M-1)\n (V,PC) = numpy.linalg.eig(cov)\n order = (-V).argsort()\n coeff = PC[:,order]\n return coeff"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning an array of all non - identity pairs of n", "response": "def all_pairs(n):\n '''Return an (n*(n - 1)) x 2 array of all non-identity pairs of n things\n \n n - # of things\n \n The array is (cleverly) ordered so that the first m * (m - 1) elements\n can be used for m < n things:\n \n n = 3\n [[0, 1], # n = 2\n [1, 0], # n = 2\n [0, 2],\n [1, 2],\n [2, 0],\n [2, 1]]\n '''\n # Get all against all\n i, j = [x.flatten() for x in np.mgrid[0:n, 0:n]]\n # Eliminate the diagonal of i == j\n i, j = [x[i != j] for x in (i,j)]\n # Order by smallest of i or j first, then j then i for neatness\n order = np.lexsort((j, i, np.maximum(i, j)))\n return np.column_stack((i[order], j[order]))"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nnormalizes an image to make the minimum zero and maximum one Taxonomy", "response": "def stretch(image, mask=None):\n '''Normalize an image to make the minimum zero and maximum one\n\n image - pixel data to be normalized\n mask - optional mask of relevant pixels. None = don't mask\n\n returns the stretched image\n '''\n image = np.array(image, float)\n if np.product(image.shape) == 0:\n return image\n if mask is None:\n minval = np.min(image)\n maxval = np.max(image)\n if minval == maxval:\n if minval < 0:\n return np.zeros_like(image)\n elif minval > 1:\n return np.ones_like(image)\n return image\n else:\n return (image - minval) / (maxval - minval)\n else:\n significant_pixels = image[mask]\n if significant_pixels.size == 0:\n return image\n minval = np.min(significant_pixels)\n maxval = np.max(significant_pixels)\n if minval == maxval:\n transformed_image = minval\n else:\n transformed_image = ((significant_pixels - minval) /\n (maxval - minval))\n result = image.copy()\n image[mask] = transformed_image\n return image"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nmasking median filter with octagonal shape data - array of data to be median filtered.", "response": "def median_filter(data, mask, radius, percent=50):\n '''Masked median filter with octagonal shape\n\n data - array of data to be median filtered.\n mask - mask of significant pixels in data\n radius - the radius of a circle inscribed into the filtering octagon\n percent - conceptually, order the significant pixels in the octagon,\n count them and choose the pixel indexed by the percent\n times the count divided by 100. More simply, 50 = median\n returns a filtered array. In areas where the median filter does\n not overlap the mask, the filtered result is undefined, but in\n practice, it will be the lowest value in the valid area.\n '''\n if mask is None:\n mask = np.ones(data.shape, dtype=bool)\n if np.all(~ mask):\n return data.copy()\n #\n # Normalize the ranked data to 0-255\n #\n if (not np.issubdtype(data.dtype, np.int) or\n np.min(data) < 0 or np.max(data) > 255):\n ranked_data, translation = rank_order(data[mask], nbins=255)\n was_ranked = True\n else:\n ranked_data = data[mask]\n was_ranked = False\n input = np.zeros(data.shape, np.uint8 )\n input[mask] = ranked_data\n\n mmask = np.ascontiguousarray(mask, np.uint8)\n\n output = np.zeros(data.shape, np.uint8)\n\n _filter.median_filter(input, mmask, output, radius, percent)\n if was_ranked:\n result = translation[output]\n else:\n result = output\n return result"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef laplacian_of_gaussian(image, mask, size, sigma):\n '''Perform the Laplacian of Gaussian transform on the image\n\n image - 2-d image array\n mask - binary mask of significant pixels\n size - length of side of square kernel to use\n sigma - standard deviation of the Gaussian\n '''\n half_size = size//2\n i,j = np.mgrid[-half_size:half_size+1,\n -half_size:half_size+1].astype(float) / float(sigma)\n distance = (i**2 + j**2)/2\n gaussian = np.exp(-distance)\n #\n # Normalize the Gaussian\n #\n gaussian = gaussian / np.sum(gaussian)\n\n log = (distance - 1) * gaussian\n #\n # Normalize the kernel to have a sum of zero\n #\n log = log - np.mean(log)\n if mask is None:\n mask = np.ones(image.shape[:2], bool)\n masked_image = image.copy()\n masked_image[~mask] = 0\n output = convolve(masked_image, log, mode='constant', cval=0)\n #\n # Do the LoG of the inverse of the mask. This finds the magnitude of the\n # contribution of the masked pixels. We then fudge by multiplying by the\n # value at the pixel of interest - this effectively sets the value at a\n # masked pixel to that of the pixel of interest.\n #\n # It underestimates the LoG, that's not a terrible thing.\n #\n correction = convolve((~ mask).astype(float), log, mode='constant', cval = 1)\n output += correction * image\n output[~ mask] = image[~ mask]\n return output", "response": "Perform the Laplacian of Gaussian transform on the image array mask size sigma"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef canny(image, mask, sigma, low_threshold, high_threshold):\n '''Edge filter an image using the Canny algorithm.\n\n sigma - the standard deviation of the Gaussian used\n low_threshold - threshold for edges that connect to high-threshold\n edges\n high_threshold - threshold of a high-threshold edge\n\n Canny, J., A Computational Approach To Edge Detection, IEEE Trans.\n Pattern Analysis and Machine Intelligence, 8:679-714, 1986\n\n William Green's Canny tutorial\n http://www.pages.drexel.edu/~weg22/can_tut.html\n '''\n #\n # The steps involved:\n #\n # * Smooth using the Gaussian with sigma above.\n #\n # * Apply the horizontal and vertical Sobel operators to get the gradients\n # within the image. The edge strength is the sum of the magnitudes\n # of the gradients in each direction.\n #\n # * Find the normal to the edge at each point using the arctangent of the\n # ratio of the Y sobel over the X sobel - pragmatically, we can\n # look at the signs of X and Y and the relative magnitude of X vs Y\n # to sort the points into 4 categories: horizontal, vertical,\n # diagonal and antidiagonal.\n #\n # * Look in the normal and reverse directions to see if the values\n # in either of those directions are greater than the point in question.\n # Use interpolation to get a mix of points instead of picking the one\n # that's the closest to the normal.\n #\n # * Label all points above the high threshold as edges.\n # * Recursively label any point above the low threshold that is 8-connected\n # to a labeled point as an edge.\n #\n # Regarding masks, any point touching a masked point will have a gradient\n # that is \"infected\" by the masked point, so it's enough to erode the\n # mask by one and then mask the output. We also mask out the border points\n # because who knows what lies beyond the edge of the image?\n #\n fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')\n smoothed = smooth_with_function_and_mask(image, fsmooth, mask)\n jsobel = convolve(smoothed, [[-1,0,1],[-2,0,2],[-1,0,1]])\n isobel = convolve(smoothed, [[-1,-2,-1],[0,0,0],[1,2,1]])\n abs_isobel = np.abs(isobel)\n abs_jsobel = np.abs(jsobel)\n magnitude = np.sqrt(isobel*isobel + jsobel*jsobel)\n #\n # Make the eroded mask. Setting the border value to zero will wipe\n # out the image edges for us.\n #\n s = generate_binary_structure(2,2)\n emask = binary_erosion(mask, s, border_value = 0)\n emask = np.logical_and(emask, magnitude > 0)\n #\n #--------- Find local maxima --------------\n #\n # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,\n # 90-135 degrees and 135-180 degrees.\n #\n local_maxima = np.zeros(image.shape,bool)\n #----- 0 to 45 degrees ------\n pts_plus = np.logical_and(isobel >= 0,\n np.logical_and(jsobel >= 0,\n abs_isobel >= abs_jsobel))\n pts_minus = np.logical_and(isobel <= 0,\n np.logical_and(jsobel <= 0,\n abs_isobel >= abs_jsobel))\n pts = np.logical_or(pts_plus, pts_minus)\n pts = np.logical_and(emask, pts)\n # Get the magnitudes shifted left to make a matrix of the points to the\n # right of pts. Similarly, shift left and down to get the points to the\n # top right of pts.\n c1 = magnitude[1:,:][pts[:-1,:]]\n c2 = magnitude[1:,1:][pts[:-1,:-1]]\n m = magnitude[pts]\n w = abs_jsobel[pts] / abs_isobel[pts]\n c_plus = c2 * w + c1 * (1-w) <= m\n c1 = magnitude[:-1,:][pts[1:,:]]\n c2 = magnitude[:-1,:-1][pts[1:,1:]]\n c_minus = c2 * w + c1 * (1-w) <= m\n local_maxima[pts] = np.logical_and(c_plus, c_minus)\n #----- 45 to 90 degrees ------\n # Mix diagonal and vertical\n #\n pts_plus = np.logical_and(isobel >= 0,\n np.logical_and(jsobel >= 0,\n abs_isobel <= abs_jsobel))\n pts_minus = np.logical_and(isobel <= 0,\n np.logical_and(jsobel <= 0,\n abs_isobel <= abs_jsobel))\n pts = np.logical_or(pts_plus, pts_minus)\n pts = np.logical_and(emask, pts)\n c1 = magnitude[:,1:][pts[:,:-1]]\n c2 = magnitude[1:,1:][pts[:-1,:-1]]\n m = magnitude[pts]\n w = abs_isobel[pts] / abs_jsobel[pts]\n c_plus = c2 * w + c1 * (1-w) <= m\n c1 = magnitude[:,:-1][pts[:,1:]]\n c2 = magnitude[:-1,:-1][pts[1:,1:]]\n c_minus = c2 * w + c1 * (1-w) <= m\n local_maxima[pts] = np.logical_and(c_plus, c_minus)\n #----- 90 to 135 degrees ------\n # Mix anti-diagonal and vertical\n #\n pts_plus = np.logical_and(isobel <= 0,\n np.logical_and(jsobel >= 0,\n abs_isobel <= abs_jsobel))\n pts_minus = np.logical_and(isobel >= 0,\n np.logical_and(jsobel <= 0,\n abs_isobel <= abs_jsobel))\n pts = np.logical_or(pts_plus, pts_minus)\n pts = np.logical_and(emask, pts)\n c1a = magnitude[:,1:][pts[:,:-1]]\n c2a = magnitude[:-1,1:][pts[1:,:-1]]\n m = magnitude[pts]\n w = abs_isobel[pts] / abs_jsobel[pts]\n c_plus = c2a * w + c1a * (1.0-w) <= m\n c1 = magnitude[:,:-1][pts[:,1:]]\n c2 = magnitude[1:,:-1][pts[:-1,1:]]\n c_minus = c2 * w + c1 * (1.0-w) <= m\n cc = np.logical_and(c_plus,c_minus)\n local_maxima[pts] = np.logical_and(c_plus, c_minus)\n #----- 135 to 180 degrees ------\n # Mix anti-diagonal and anti-horizontal\n #\n pts_plus = np.logical_and(isobel <= 0,\n np.logical_and(jsobel >= 0,\n abs_isobel >= abs_jsobel))\n pts_minus = np.logical_and(isobel >= 0,\n np.logical_and(jsobel <= 0,\n abs_isobel >= abs_jsobel))\n pts = np.logical_or(pts_plus, pts_minus)\n pts = np.logical_and(emask, pts)\n c1 = magnitude[:-1,:][pts[1:,:]]\n c2 = magnitude[:-1,1:][pts[1:,:-1]]\n m = magnitude[pts]\n w = abs_jsobel[pts] / abs_isobel[pts]\n c_plus = c2 * w + c1 * (1-w) <= m\n c1 = magnitude[1:,:][pts[:-1,:]]\n c2 = magnitude[1:,:-1][pts[:-1,1:]]\n c_minus = c2 * w + c1 * (1-w) <= m\n local_maxima[pts] = np.logical_and(c_plus, c_minus)\n #\n #---- Create two masks at the two thresholds.\n #\n high_mask = np.logical_and(local_maxima, magnitude >= high_threshold)\n low_mask = np.logical_and(local_maxima, magnitude >= low_threshold)\n #\n # Segment the low-mask, then only keep low-segments that have\n # some high_mask component in them\n #\n labels,count = label(low_mask, np.ones((3,3),bool))\n if count == 0:\n return low_mask\n\n sums = fix(scind.sum(high_mask, labels, np.arange(count,dtype=np.int32)+1))\n good_label = np.zeros((count+1,),bool)\n good_label[1:] = sums > 0\n output_mask = good_label[labels]\n return output_mask", "response": "Edge filter an image using the Canny algorithm."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nfinding edges using the Roberts algorithm and return the magnitude of the output of the two Roberts convolution kernels.", "response": "def roberts(image, mask=None):\n '''Find edges using the Roberts algorithm\n\n image - the image to process\n mask - mask of relevant points\n\n The algorithm returns the magnitude of the output of the two Roberts\n convolution kernels.\n\n The following is the canonical citation for the algorithm:\n L. Roberts Machine Perception of 3-D Solids, Optical and\n Electro-optical Information Processing, MIT Press 1965.\n\n The following website has a tutorial on the algorithm:\n http://homepages.inf.ed.ac.uk/rbf/HIPR2/roberts.htm\n '''\n result = np.zeros(image.shape)\n #\n # Four quadrants and two convolutions:\n #\n # q0,0 | q0,1 1 | 0 anti-diagonal\n # q1,0 | q1,1 0 | -1\n #\n # q-1,0 | q0,0 0 | 1 diagonal\n # q-1,1 | q0,1 -1 | 0\n #\n # Points near the mask edges and image edges are computed unreliably\n # so make them zero (no edge) in the result\n #\n if mask is None:\n mask = np.ones(image.shape, bool)\n big_mask = binary_erosion(mask,\n generate_binary_structure(2,2),\n border_value = 0)\n result[big_mask==False] = 0\n q00 = image[:,:][big_mask]\n q11 = image[1:,1:][big_mask[:-1,:-1]]\n qm11 = image[:-1,1:][big_mask[1:,:-1]]\n diagonal = q00 - qm11\n anti_diagonal = q00 - q11\n result[big_mask] = np.sqrt(diagonal*diagonal + anti_diagonal*anti_diagonal)\n return result"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncalculating the absolute magnitude Sobel to find the edges of the image.", "response": "def sobel(image, mask=None):\n '''Calculate the absolute magnitude Sobel to find the edges\n\n image - image to process\n mask - mask of relevant points\n\n Take the square root of the sum of the squares of the horizontal and\n vertical Sobels to get a magnitude that's somewhat insensitive to\n direction.\n\n Note that scipy's Sobel returns a directional Sobel which isn't\n useful for edge detection in its raw form.\n '''\n return np.sqrt(hsobel(image,mask)**2 + vsobel(image,mask)**2)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nfind the edge magnitude using the Prewitt transform image - image to process mask - mask of relevant points to process mask of relevant points Returns the square root of the horizontal and vertical Prewitt transforms.", "response": "def prewitt(image, mask=None):\n '''Find the edge magnitude using the Prewitt transform\n\n image - image to process\n mask - mask of relevant points\n\n Return the square root of the sum of squares of the horizontal\n and vertical Prewitt transforms.\n '''\n return np.sqrt(hprewitt(image,mask)**2 + vprewitt(image,mask)**2)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef hprewitt(image, mask=None):\n '''Find the horizontal edges of an image using the Prewitt transform\n\n image - image to process\n mask - mask of relevant points\n\n We use the following kernel and return the absolute value of the\n result at each point:\n 1 1 1\n 0 0 0\n -1 -1 -1\n '''\n if mask is None:\n mask = np.ones(image.shape, bool)\n big_mask = binary_erosion(mask,\n generate_binary_structure(2,2),\n border_value = 0)\n result = np.abs(convolve(image, np.array([[ 1, 1, 1],\n [ 0, 0, 0],\n [-1,-1,-1]]).astype(float)/3.0))\n result[big_mask==False] = 0\n return result", "response": "Find the horizontal edges of an image using the Prewitt transform and return the absolute value of the image at each point"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef enhance_dark_holes(image, min_radius, max_radius, mask=None):\n '''Enhance dark holes using a rolling ball filter\n\n image - grayscale 2-d image\n radii - a vector of radii: we enhance holes at each given radius\n '''\n #\n # Do 4-connected erosion\n #\n se = np.array([[False, True, False],\n [True, True, True],\n [False, True, False]])\n #\n # Invert the intensities\n #\n inverted_image = image.max() - image\n previous_reconstructed_image = inverted_image\n eroded_image = inverted_image\n smoothed_image = np.zeros(image.shape)\n for i in range(max_radius+1):\n eroded_image = grey_erosion(eroded_image, mask=mask, footprint = se)\n reconstructed_image = grey_reconstruction(eroded_image, inverted_image,\n footprint = se)\n output_image = previous_reconstructed_image - reconstructed_image\n if i >= min_radius:\n smoothed_image = np.maximum(smoothed_image,output_image)\n previous_reconstructed_image = reconstructed_image\n return smoothed_image", "response": "Enhance dark holes using a rolling ball filter image - grayscale 2 - d image radii - a vector of radii that is used to filter the dark holes"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nblurs an image using a circular averaging filter", "response": "def circular_average_filter(image, radius, mask=None):\n '''Blur an image using a circular averaging filter (pillbox)\n\n image - grayscale 2-d image\n radii - radius of filter in pixels\n\n The filter will be within a square matrix of side 2*radius+1\n\n This code is translated straight from MATLAB's fspecial function\n '''\n\n crad = int(np.ceil(radius-0.5))\n x,y = np.mgrid[-crad:crad+1,-crad:crad+1].astype(float)\n maxxy = np.maximum(abs(x),abs(y))\n minxy = np.minimum(abs(x),abs(y))\n\n m1 = ((radius **2 < (maxxy+0.5)**2 + (minxy-0.5)**2)*(minxy-0.5) +\n (radius**2 >= (maxxy+0.5)**2 + (minxy-0.5)**2) *\n np.real(np.sqrt(np.asarray(radius**2 - (maxxy + 0.5)**2,dtype=complex))))\n m2 = ((radius**2 > (maxxy-0.5)**2 + (minxy+0.5)**2)*(minxy+0.5) +\n (radius**2 <= (maxxy-0.5)**2 + (minxy+0.5)**2)*\n np.real(np.sqrt(np.asarray(radius**2 - (maxxy - 0.5)**2,dtype=complex))))\n\n sgrid = ((radius**2*(0.5*(np.arcsin(m2/radius) - np.arcsin(m1/radius)) +\n 0.25*(np.sin(2*np.arcsin(m2/radius)) - np.sin(2*np.arcsin(m1/radius)))) -\n (maxxy-0.5)*(m2-m1) + (m1-minxy+0.5)) *\n ((((radius**2 < (maxxy+0.5)**2 + (minxy+0.5)**2) &\n (radius**2 > (maxxy-0.5)**2 + (minxy-0.5)**2)) |\n ((minxy == 0) & (maxxy-0.5 < radius) & (maxxy+0.5 >= radius)))) )\n\n sgrid = sgrid + ((maxxy+0.5)**2 + (minxy+0.5)**2 < radius**2)\n sgrid[crad,crad] = np.minimum(np.pi*radius**2,np.pi/2)\n if ((crad>0) and (radius > crad-0.5) and (radius**2 < (crad-0.5)**2+0.25)):\n m1 = np.sqrt(radius**2 - (crad - 0.5)**2)\n m1n = m1/radius\n sg0 = 2*(radius**2*(0.5*np.arcsin(m1n) + 0.25*np.sin(2*np.arcsin(m1n)))-m1*(crad-0.5))\n sgrid[2*crad,crad] = sg0\n sgrid[crad,2*crad] = sg0\n sgrid[crad,0] = sg0\n sgrid[0,crad] = sg0\n sgrid[2*crad-1,crad] = sgrid[2*crad-1,crad] - sg0\n sgrid[crad,2*crad-1] = sgrid[crad,2*crad-1] - sg0\n sgrid[crad,1] = sgrid[crad,1] - sg0\n sgrid[1,crad] = sgrid[1,crad] - sg0\n\n sgrid[crad,crad] = np.minimum(sgrid[crad,crad],1)\n kernel = sgrid/sgrid.sum()\n\n output = convolve(image, kernel, mode='constant')\n if mask is None:\n mask = np.ones(image.shape,np.uint8)\n else:\n mask = np.array(mask, np.uint8)\n output = masked_convolution(image, mask, kernel)\n output[mask==0] = image[mask==0]\n\n return output"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef velocity_kalman_model():\n '''Return a KalmanState set up to model objects with constant velocity\n\n The observation and measurement vectors are i,j.\n The state vector is i,j,vi,vj\n '''\n om = np.array([[1,0,0,0], [0, 1, 0, 0]])\n tm = np.array([[1,0,1,0],\n [0,1,0,1],\n [0,0,1,0],\n [0,0,0,1]])\n return KalmanState(om, tm)", "response": "Return a KalmanState set up to model objects with constant velocity"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a KalmanState set up to model going backwards in time", "response": "def reverse_velocity_kalman_model():\n '''Return a KalmanState set up to model going backwards in time'''\n om = np.array([[1,0,0,0], [0, 1, 0, 0]])\n tm = np.array([[1,0,-1,0],\n [0,1,0,-1],\n [0,0,1,0],\n [0,0,0,1]])\n return KalmanState(om, tm)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the kalman filter for the features in the new frame.", "response": "def kalman_filter(kalman_state, old_indices, coordinates, q, r):\n '''Return the kalman filter for the features in the new frame\n\n kalman_state - state from last frame\n\n old_indices - the index per feature in the last frame or -1 for new\n\n coordinates - Coordinates of the features in the new frame.\n\n q - the process error covariance - see equ 1.3 and 1.10 from Welch\n\n r - measurement error covariance of features - see eqn 1.7 and 1.8 from welch.\n\n returns a new KalmanState containing the kalman filter of\n the last state by the given coordinates.\n\n Refer to kalmanGainLinearMotion.m and\n http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf\n\n for info on the algorithm.\n '''\n assert isinstance(kalman_state, KalmanState)\n old_indices = np.array(old_indices)\n if len(old_indices) == 0:\n return KalmanState(kalman_state.observation_matrix,\n kalman_state.translation_matrix)\n #\n # Cull missing features in old state and collect only matching coords\n #\n matching = old_indices != -1\n new_indices = np.arange(len(old_indices))[~matching]\n retained_indices = np.arange(len(old_indices))[matching]\n new_coords = coordinates[new_indices]\n observation_matrix_t = kalman_state.observation_matrix.transpose()\n if len(retained_indices) > 0:\n kalman_state = kalman_state.deep_copy()\n coordinates = coordinates[retained_indices]\n kalman_state.map_frames(old_indices[retained_indices])\n #\n # Time update equations\n #\n # From eqn 1.9 of Welch\n #\n state_vec = kalman_state.predicted_state_vec\n #\n # From eqn 1.10 of Welch\n #\n state_cov = dot_n(\n dot_n(kalman_state.translation_matrix, kalman_state.state_cov),\n kalman_state.translation_matrix.transpose()) + q[matching]\n #\n # From eqn 1.11 of welch\n #\n kalman_gain_numerator = dot_n(state_cov, observation_matrix_t)\n\n kalman_gain_denominator = dot_n(\n dot_n(kalman_state.observation_matrix, state_cov),\n observation_matrix_t) + r[matching]\n kalman_gain_denominator = inv_n(kalman_gain_denominator)\n kalman_gain = dot_n(kalman_gain_numerator, kalman_gain_denominator)\n #\n # Eqn 1.12 of Welch\n #\n difference = coordinates - dot_n(kalman_state.observation_matrix,\n state_vec[:,:,np.newaxis])[:,:,0]\n state_noise = dot_n(kalman_gain, difference[:,:,np.newaxis])[:,:,0]\n state_vec = state_vec + state_noise\n #\n # Eqn 1.13 of Welch (factored from (I - KH)P to P - KHP)\n #\n state_cov = (state_cov -\n dot_n(dot_n(kalman_gain, kalman_state.observation_matrix),\n state_cov))\n #\n # Collect all of the state noise in one array. We produce an I and J\n # variance. Notes in kalmanGainLinearMotion indicate that you\n # might want a single variance, combining I & J. An alternate\n # might be R and theta, a variance of angular consistency and one\n # of absolute velocity.\n #\n # Add an index to the state noise in the rightmost column\n #\n idx = np.arange(len(state_noise))\n #\n # Stack the rows with the old ones\n #\n all_state_noise = np.vstack((kalman_state.state_noise, state_noise))\n all_state_noise_idx = np.hstack((kalman_state.state_noise_idx, idx))\n noise_var = np.zeros((len(idx), all_state_noise.shape[1]))\n for i in range(all_state_noise.shape[1]):\n noise_var[:, i] = fix(scind.variance(all_state_noise[:, i],\n all_state_noise_idx,\n idx))\n obs_vec = dot_n(kalman_state.observation_matrix,\n state_vec[:,:,np.newaxis])[:,:,0]\n kalman_state = KalmanState(kalman_state.observation_matrix,\n kalman_state.translation_matrix,\n state_vec, state_cov, noise_var,\n all_state_noise,\n all_state_noise_idx)\n else:\n # Erase all previous features\n kalman_state = KalmanState(kalman_state.observation_matrix,\n kalman_state.translation_matrix)\n if len(new_coords) > 0:\n #\n # Fill in the initial states:\n #\n state_vec = dot_n(observation_matrix_t,\n new_coords[:,:,np.newaxis])[:,:,0]\n #\n # The COV for the hidden, undetermined features should be large\n # and the COV for others should be small\n #\n nstates = kalman_state.state_len\n nnew_features = len(new_indices)\n cov_vec = SMALL_KALMAN_COV / np.dot(observation_matrix_t,\n np.ones(kalman_state.obs_len))\n cov_vec[~ np.isfinite(cov_vec)] = LARGE_KALMAN_COV\n cov_matrix = np.diag(cov_vec)\n state_cov = cov_matrix[np.newaxis,:,:][np.zeros(nnew_features,int)]\n #\n # The noise variance is all ones in Jaqman\n #\n noise_var = np.ones((len(new_indices), kalman_state.state_len))\n #\n # Map the retained indices to their new slots and new ones to empty\n # slots (=-1)\n #\n kalman_state.add_features(retained_indices,\n new_indices,\n state_vec, state_cov, noise_var)\n return kalman_state"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef line_integration(image, angle, decay, sigma):\n '''Integrate the image along the given angle\n\n DIC images are the directional derivative of the underlying\n image. This filter reconstructs the original image by integrating\n along that direction.\n\n image - a 2-dimensional array\n\n angle - shear angle in radians. We integrate perpendicular to this angle\n\n decay - an exponential decay applied to the integration\n\n sigma - the standard deviation of a Gaussian which is used to\n smooth the image in the direction parallel to the shear angle.\n '''\n #\n # Normalize the image so that the mean is zero\n #\n normalized = image - np.mean(image)\n #\n # Rotate the image so the J direction is perpendicular to the shear angle.\n #\n rotated = scind.rotate(normalized, -angle)\n #\n # Smooth in only the i direction\n #\n smoothed = scind.gaussian_filter1d(rotated, sigma) if sigma > 0 else rotated\n #\n # We want img_out[:,j+1] to be img_out[:,j] * decay + img[j+1]\n # Could be done by convolution with a ramp, maybe in FFT domain,\n # but we just do a bunch of steps here.\n #\n result_fwd = smoothed.copy()\n for i in range(1,result_fwd.shape[0]):\n result_fwd[i] += result_fwd[i-1] * decay\n result_rev = smoothed.copy()\n for i in reversed(range(result_rev.shape[0]-1)):\n result_rev[i] += result_rev[i+1] * decay\n result = (result_fwd - result_rev) / 2\n #\n # Rotate and chop result\n #\n result = scind.rotate(result, angle)\n ipad = int((result.shape[0] - image.shape[0]) / 2)\n jpad = int((result.shape[1] - image.shape[1]) / 2)\n result = result[ipad:(ipad + image.shape[0]),\n jpad:(jpad + image.shape[1])]\n #\n # Scale the resultant image similarly to the output.\n #\n img_min, img_max = np.min(image), np.max(image)\n result_min, result_max = np.min(result), np.max(result)\n if (img_min == img_max) or (result_min == result_max):\n return np.zeros(result.shape)\n result = (result - result_min) / (result_max - result_min)\n result = img_min + result * (img_max - img_min)\n return result", "response": "Integrate the image along the given angle and integrate the perpendicular to the shear angle."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncalculating a weighted variance of an image", "response": "def variance_transform(img, sigma, mask=None):\n '''Calculate a weighted variance of the image\n\n This function caluclates the variance of an image, weighting the\n local contributions by a Gaussian.\n\n img - image to be transformed\n sigma - standard deviation of the Gaussian\n mask - mask of relevant pixels in the image\n '''\n if mask is None:\n mask = np.ones(img.shape, bool)\n else:\n img = img.copy()\n img[~mask] = 0\n #\n # This is the Gaussian of the mask... so we can normalize for\n # pixels near the edge of the mask\n #\n gmask = scind.gaussian_filter(mask.astype(float), sigma,\n mode = 'constant')\n img_mean = scind.gaussian_filter(img, sigma,\n mode = 'constant') / gmask\n img_squared = scind.gaussian_filter(img ** 2, sigma,\n mode = 'constant') / gmask\n var = img_squared - img_mean ** 2\n return var"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef inv_n(x):\n '''given N matrices, return N inverses'''\n #\n # The inverse of a small matrix (e.g. 3x3) is\n #\n # 1\n # ----- C(j,i)\n # det(A)\n #\n # where C(j,i) is the cofactor of matrix A at position j,i\n #\n assert x.ndim == 3\n assert x.shape[1] == x.shape[2]\n c = np.array([ [cofactor_n(x, j, i) * (1 - ((i+j) % 2)*2)\n for j in range(x.shape[1])]\n for i in range(x.shape[1])]).transpose(2,0,1)\n return c / det_n(x)[:, np.newaxis, np.newaxis]", "response": "given N matrices return N inverses"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef det_n(x):\n '''given N matrices, return N determinants'''\n assert x.ndim == 3\n assert x.shape[1] == x.shape[2]\n if x.shape[1] == 1:\n return x[:,0,0]\n result = np.zeros(x.shape[0])\n for permutation in permutations(np.arange(x.shape[1])):\n sign = parity(permutation)\n result += np.prod([x[:, i, permutation[i]]\n for i in range(x.shape[1])], 0) * sign\n sign = - sign\n return result", "response": "given N matrices return N determinants"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the cofactor of n matrices x at position i j", "response": "def cofactor_n(x, i, j):\n '''Return the cofactor of n matrices x[n,i,j] at position i,j\n\n The cofactor is the determinant of the matrix formed by removing\n row i and column j.\n '''\n m = x.shape[1]\n mr = np.arange(m)\n i_idx = mr[mr != i]\n j_idx = mr[mr != j]\n return det_n(x[:, i_idx[:, np.newaxis],\n j_idx[np.newaxis, :]])"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef dot_n(x, y):\n '''given two tensors N x I x K and N x K x J return N dot products\n\n If either x or y is 2-dimensional, broadcast it over all N.\n\n Dot products are size N x I x J.\n Example:\n x = np.array([[[1,2], [3,4], [5,6]],[[7,8], [9,10],[11,12]]])\n y = np.array([[[1,2,3], [4,5,6]],[[7,8,9],[10,11,12]]])\n print dot_n(x,y)\n\n array([[[ 9, 12, 15],\n [ 19, 26, 33],\n [ 29, 40, 51]],\n\n [[129, 144, 159],\n [163, 182, 201],\n [197, 220, 243]]])\n '''\n if x.ndim == 2:\n if y.ndim == 2:\n return np.dot(x, y)\n x3 = False\n y3 = True\n nlen = y.shape[0]\n elif y.ndim == 2:\n nlen = x.shape[0]\n x3 = True\n y3 = False\n else:\n assert x.shape[0] == y.shape[0]\n nlen = x.shape[0]\n x3 = True\n y3 = True\n assert x.shape[1+x3] == y.shape[0+y3]\n n, i, j, k = np.mgrid[0:nlen, 0:x.shape[0+x3], 0:y.shape[1+y3],\n 0:y.shape[0+y3]]\n return np.sum((x[n, i, k] if x3 else x[i,k]) *\n (y[n, k, j] if y3 else y[k,j]), 3)", "response": "given two tensors N x I x K and N x J return N dot products"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef permutations(x):\n '''Given a listlike, x, return all permutations of x\n\n Returns the permutations of x in the lexical order of their indices:\n e.g.\n >>> x = [ 1, 2, 3, 4 ]\n >>> for p in permutations(x):\n >>> print p\n [ 1, 2, 3, 4 ]\n [ 1, 2, 4, 3 ]\n [ 1, 3, 2, 4 ]\n [ 1, 3, 4, 2 ]\n [ 1, 4, 2, 3 ]\n [ 1, 4, 3, 2 ]\n [ 2, 1, 3, 4 ]\n ...\n [ 4, 3, 2, 1 ]\n '''\n #\n # The algorithm is attributed to Narayana Pandit from his\n # Ganita Kaumundi (1356). The following is from\n #\n # http://en.wikipedia.org/wiki/Permutation#Systematic_generation_of_all_permutations\n #\n # 1. Find the largest index k such that a[k] < a[k + 1].\n # If no such index exists, the permutation is the last permutation.\n # 2. Find the largest index l such that a[k] < a[l].\n # Since k + 1 is such an index, l is well defined and satisfies k < l.\n # 3. Swap a[k] with a[l].\n # 4. Reverse the sequence from a[k + 1] up to and including the final\n # element a[n].\n #\n yield list(x) # don't forget to do the first one\n x = np.array(x)\n a = np.arange(len(x))\n while True:\n # 1 - find largest or stop\n ak_lt_ak_next = np.argwhere(a[:-1] < a[1:])\n if len(ak_lt_ak_next) == 0:\n raise StopIteration()\n k = ak_lt_ak_next[-1, 0]\n # 2 - find largest a[l] < a[k]\n ak_lt_al = np.argwhere(a[k] < a)\n l = ak_lt_al[-1, 0]\n # 3 - swap\n a[k], a[l] = (a[l], a[k])\n # 4 - reverse\n if k < len(x)-1:\n a[k+1:] = a[:k:-1].copy()\n yield x[a].tolist()", "response": "Given a listlike x return all permutations of x"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef convex_hull_transform(image, levels=256, mask = None,\n chunksize = CONVEX_HULL_CHUNKSIZE,\n pass_cutoff = 16):\n '''Perform the convex hull transform of this image\n\n image - image composed of integer intensity values\n levels - # of levels that we separate the image into\n mask - mask of points to consider or None to consider all points\n chunksize - # of points processed in first pass of convex hull\n\n for each intensity value, find the convex hull of pixels at or above\n that value and color all pixels within the hull with that value.\n '''\n # Scale the image into the requisite number of levels\n if mask is None:\n img_min = np.min(image)\n img_max = np.max(image)\n else:\n unmasked_pixels = image[mask]\n if len(unmasked_pixels) == 0:\n return np.zeros(image.shape, image.dtype)\n img_min = np.min(unmasked_pixels)\n img_max = np.max(unmasked_pixels)\n img_shape = tuple(image.shape)\n if img_min == img_max:\n return image\n\n scale = (img_min +\n np.arange(levels).astype(image.dtype) *\n (img_max - img_min) / float(levels-1))\n image = ((image - img_min) * (levels-1) / (img_max - img_min))\n if mask is not None:\n image[~mask] = 0\n #\n # If there are more than 16 levels, we do the method first at a coarse\n # scale. The dark objects can produce points at every level, so doing\n # two passes can reduce the number of points in the second pass to\n # only the difference between two levels at the coarse pass.\n #\n if levels > pass_cutoff:\n sub_levels = int(np.sqrt(levels))\n rough_image = convex_hull_transform(np.floor(image), sub_levels)\n image = np.maximum(image, rough_image)\n del rough_image\n image = image.astype(int)\n #\n # Get rid of any levels that have no representatives\n #\n unique = np.unique(image)\n new_values = np.zeros(levels, int)\n new_values[unique] = np.arange(len(unique))\n scale = scale[unique]\n image = new_values[image]\n #\n # Start by constructing the list of points which are local maxima\n #\n min_image = grey_erosion(image, footprint = np.ones((3,3), bool)).astype(int)\n #\n # Set the borders of the min_image to zero so that the border pixels\n # will be in all convex hulls below their intensity\n #\n min_image[0, :] = 0\n min_image[-1, :] = 0\n min_image[:, 0] = 0\n min_image[:, -1] = 0\n\n i,j = np.mgrid[0:image.shape[0], 0:image.shape[1]]\n mask = image > min_image\n\n i = i[mask]\n j = j[mask]\n min_image = min_image[mask]\n image = image[mask]\n #\n # Each point that is a maximum is a potential vertex in the convex hull\n # for each value above the minimum. Therefore, it appears\n #\n # image - min_image\n #\n # times in the i,j,v list of points. So we can do a sum to calculate\n # the number of points, then use cumsum to figure out the first index\n # in that array of points for each i,j,v. We can then use cumsum\n # again on the array of points to assign their levels.\n #\n count = image - min_image\n npoints = np.sum(count)\n # The index in the big array of the first point to place for each\n # point\n first_index_in_big = np.cumsum(count) - count\n #\n # The big array can be quite big, for example if there are lots of\n # thin, dark objects. We do two passes of convex hull: the convex hull\n # of the convex hulls of several regions is the convex hull of the whole\n # so it doesn't matter too much how we break up the array.\n #\n first_i = np.zeros(0, int)\n first_j = np.zeros(0, int)\n first_levels = np.zeros(0, int)\n chunkstart = 0\n while chunkstart < len(count):\n idx = first_index_in_big[chunkstart]\n iend = idx + chunksize\n if iend >= npoints:\n chunkend = len(count)\n iend = npoints\n else:\n chunkend = np.searchsorted(first_index_in_big, iend)\n if chunkend < len(count):\n iend = first_index_in_big[chunkend]\n else:\n iend = npoints\n chunk_first_index_in_big = first_index_in_big[chunkstart:chunkend] - idx\n chunkpoints = iend - idx\n #\n # For the big array, construct an array of indexes into the small array\n #\n index_in_small = np.zeros(chunkpoints, int)\n index_in_small[0] = chunkstart\n index_in_small[chunk_first_index_in_big[1:]] = 1\n index_in_small = np.cumsum(index_in_small)\n #\n # We're going to do a cumsum to make the big array of levels. Point\n # n+1 broadcasts its first value into first_index_in_big[n+1].\n # The value that precedes it is image[n]. Therefore, in order to\n # get the correct value in cumsum:\n #\n # ? + image[n] = min_image[n+1]+1\n # ? = min_image[n+1] + 1 - image[n]\n #\n levels = np.ones(chunkpoints, int)\n levels[0] = min_image[chunkstart] + 1\n levels[chunk_first_index_in_big[1:]] = \\\n min_image[chunkstart+1:chunkend] - image[chunkstart:chunkend-1] + 1\n levels = np.cumsum(levels)\n #\n # Construct the ijv\n #\n ijv = np.column_stack((i[index_in_small], j[index_in_small], levels))\n #\n # Get all of the convex hulls\n #\n pts, counts = convex_hull_ijv(ijv, np.arange(1, len(unique)))\n first_i = np.hstack((first_i, pts[:, 1]))\n first_j = np.hstack((first_j, pts[:, 2]))\n first_levels = np.hstack((first_levels, pts[:, 0]))\n chunkstart = chunkend\n #\n # Now do the convex hull of the reduced list of points\n #\n ijv = np.column_stack((first_i, first_j, first_levels))\n pts, counts = convex_hull_ijv(ijv, np.arange(1, len(unique)))\n #\n # Get the points along the lines described by the convex hulls\n #\n # There are N points for each label. Draw a line from each to\n # the next, except for the last which we draw from last to first\n #\n labels = pts[:, 0]\n i = pts[:, 1]\n j = pts[:, 2]\n first_index = np.cumsum(counts) - counts\n last_index = first_index + counts - 1\n next = np.arange(len(labels)) + 1\n next[last_index] = first_index\n index, count, i, j = get_line_pts(i, j, i[next], j[next])\n #\n # use a cumsum to get the index of each point from get_line_pts\n # relative to the labels vector\n #\n big_index = np.zeros(len(i), int)\n big_index[index[1:]] = 1\n big_index = np.cumsum(big_index)\n labels = labels[big_index]\n #\n # A given i,j might be represented more than once. Take the maximum\n # label at each i,j. First sort by i,j and label. Then take only values\n # that have a different i,j than the succeeding value. The last element\n # is always a winner.\n #\n order = np.lexsort((labels, i, j))\n i = i[order]\n j = j[order]\n labels = labels[order]\n mask = np.hstack((((i[1:] != i[:-1]) | (j[1:] != j[:-1])),[True]))\n i = i[mask]\n j = j[mask]\n labels = labels[mask]\n #\n # Now, we have an interesting object. It's ordered by j, then i which\n # means that we have scans of interesting i at each j. The points\n # that aren't represented should have the minimum of the values\n # above and below.\n #\n # We can play a cumsum trick to do this, placing the difference\n # of a point with its previous in the 2-d image, then summing along\n # each i axis to set empty values to the value of the nearest occupied\n # value above and a similar trick to set empty values to the nearest\n # value below. We then take the minimum of the two results.\n #\n first = np.hstack(([True], j[1:] != j[:-1]))\n top = np.zeros(img_shape, labels.dtype)\n top[i[first], j[first]] = labels[first]\n top[i[~first], j[~first]] = (labels[1:] - labels[:-1])[~first[1:]]\n top = np.cumsum(top, 0)\n\n # From 0 to the location of the first point, set to value of first point\n bottom = np.zeros(img_shape, labels.dtype)\n bottom[0, j[first]] = labels[first]\n # From 1 + the location of the previous point, set to the next point\n last = np.hstack((first[1:], [True]))\n bottom[i[:-1][~first[1:]]+1, j[~first]] = (labels[1:] - labels[:-1])[~first[1:]]\n # Set 1 + the location of the last point to -labels so that all past\n # the end will be zero (check for i at end...)\n llast = last & (i < img_shape[0] - 1)\n bottom[i[llast]+1, j[llast]] = - labels[llast]\n bottom = np.cumsum(bottom, 0)\n image = np.minimum(top, bottom)\n return scale[image]", "response": "Perform the convex hull transform of this image"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncalculates the hessian of the image at each pixel.", "response": "def hessian(image, return_hessian=True, return_eigenvalues=True, return_eigenvectors=True):\n '''Calculate hessian, its eigenvalues and eigenvectors\n\n image - n x m image. Smooth the image with a Gaussian to get derivatives\n at different scales.\n\n return_hessian - true to return an n x m x 2 x 2 matrix of the hessian\n at each pixel\n\n return_eigenvalues - true to return an n x m x 2 matrix of the eigenvalues\n of the hessian at each pixel\n\n return_eigenvectors - true to return an n x m x 2 x 2 matrix of the\n eigenvectors of the hessian at each pixel\n\n The values of the border pixels for the image are not calculated and\n are zero\n '''\n #The Hessian, d(f(x0, x1))/dxi/dxj for i,j = [0,1] is approximated by the\n #following kernels:\n\n #d00: [[1], [-2], [1]]\n #d11: [[1, -2, 1]]\n #d01 and d10: [[ 1, 0,-1],\n #[ 0, 0, 0],\n #[ -1, 0, 1]] / 2\n\n\n #The eigenvalues of the hessian:\n #[[d00, d01]\n #[d01, d11]]\n #L1 = (d00 + d11) / 2 + ((d00 + d11)**2 / 4 - (d00 * d11 - d01**2)) ** .5\n #L2 = (d00 + d11) / 2 - ((d00 + d11)**2 / 4 - (d00 * d11 - d01**2)) ** .5\n\n #The eigenvectors of the hessian:\n #if d01 != 0:\n #[(L1 - d11, d01), (L2 - d11, d01)]\n #else:\n #[ (1, 0), (0, 1) ]\n\n\n #Ideas and code borrowed from:\n #http://www.math.harvard.edu/archive/21b_fall_04/exhibits/2dmatrices/index.html\n #http://www.longair.net/edinburgh/imagej/tubeness/\n\n\n hessian = np.zeros((image.shape[0], image.shape[1], 2, 2))\n hessian[1:-1, :, 0, 0] = image[:-2, :] - (2 * image[1:-1, :]) + image[2:, :]\n hessian[1:-1, 1:-1, 0, 1] = hessian[1:-1, 1:-1, 0, 1] = (\n image[2:, 2:] + image[:-2, :-2] -\n image[2:, :-2] - image[:-2, 2:]) / 4\n hessian[:, 1:-1, 1, 1] = image[:, :-2] - (2 * image[:, 1:-1]) + image[:, 2:]\n #\n # Solve the eigenvalue equation:\n # H x = L x\n #\n # Much of this from Eigensystem2x2Float.java from tubeness\n #\n A = hessian[:, :, 0, 0]\n B = hessian[:, :, 0, 1]\n C = hessian[:, :, 1, 1]\n\n b = -(A + C)\n c = A * C - B * B\n discriminant = b * b - 4 * c\n\n # pn is something that broadcasts over all points and either adds or\n # subtracts the +/- part of the eigenvalues\n\n pn = np.array([1, -1])[np.newaxis, np.newaxis, :]\n L = (- b[:, :, np.newaxis] +\n (np.sqrt(discriminant)[:, :, np.newaxis] * pn)) / 2\n #\n # Report eigenvalue # 0 as the one with the highest absolute magnitude\n #\n L[np.abs(L[:, :, 1]) > np.abs(L[:, :, 0]), :] =\\\n L[np.abs(L[:, :, 1]) > np.abs(L[:, :, 0]), ::-1]\n\n\n if return_eigenvectors:\n #\n # Calculate for d01 != 0\n #\n v = np.ones((image.shape[0], image.shape[1], 2, 2)) * np.nan\n v[:, :, :, 0] = L - hessian[:, :, 1, 1, np.newaxis]\n v[:, :, :, 1] = hessian[:, :, 0, 1, np.newaxis]\n #\n # Calculate for d01 = 0\n default = np.array([[1, 0], [0, 1]])[np.newaxis, :, :]\n v[hessian[:, :, 0, 1] == 0] = default\n #\n # Normalize the vectors\n #\n d = np.sqrt(np.sum(v * v, 3))\n v /= d[:, :, :, np.newaxis]\n\n result = []\n if return_hessian:\n result.append(hessian)\n if return_eigenvalues:\n result.append(L)\n if return_eigenvectors:\n result.append(v)\n if len(result) == 0:\n return\n elif len(result) == 1:\n return result[0]\n return tuple(result)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef poisson_equation(image, gradient=1, max_iter=100, convergence=.01, percentile = 90.0):\n '''Estimate the solution to the Poisson Equation\n\n The Poisson Equation is the solution to gradient(x) = h^2/4 and, in this\n context, we use a boundary condition where x is zero for background\n pixels. Also, we set h^2/4 = 1 to indicate that each pixel is a distance\n of 1 from its neighbors.\n\n The estimation exits after max_iter iterations or if the given percentile\n of foreground pixels differ by less than the convergence fraction\n from one pass to the next.\n\n Some ideas taken from Gorelick, \"Shape representation and classification\n using the Poisson Equation\", IEEE Transactions on Pattern Analysis and\n Machine Intelligence V28, # 12, 2006\n\n image - binary image with foreground as True\n gradient - the target gradient between 4-adjacent pixels\n max_iter - maximum # of iterations at a given level\n convergence - target fractional difference between values from previous\n and next pass\n percentile - measure convergence at this percentile\n '''\n # Evaluate the poisson equation with zero-padded boundaries\n pe = np.zeros((image.shape[0]+2, image.shape[1]+2))\n if image.shape[0] > 64 and image.shape[1] > 64:\n #\n # Sub-sample to get seed values\n #\n sub_image = image[::2, ::2]\n sub_pe = poisson_equation(sub_image,\n gradient=gradient*2,\n max_iter=max_iter,\n convergence=convergence)\n coordinates = np.mgrid[0:(sub_pe.shape[0]*2),\n 0:(sub_pe.shape[1]*2)].astype(float) / 2\n pe[1:(sub_image.shape[0]*2+1), 1:(sub_image.shape[1]*2+1)] = \\\n scind.map_coordinates(sub_pe, coordinates, order=1)\n pe[:image.shape[0], :image.shape[1]][~image] = 0\n else:\n pe[1:-1,1:-1] = image\n #\n # evaluate only at i and j within the foreground\n #\n i, j = np.mgrid[0:pe.shape[0], 0:pe.shape[1]]\n mask = (i>0) & (i0) & (j 0:\n self.state_vec = self.state_vec[old_indices]\n self.state_cov = self.state_cov[old_indices]\n self.noise_var = self.noise_var[old_indices]\n if self.has_cached_obs_vec:\n self.obs_vec = self.obs_vec[old_indices]\n if self.has_cached_predicted_state_vec:\n self.p_state_vec = self.p_state_vec[old_indices]\n if len(self.state_noise_idx) > 0:\n #\n # We have to renumber the new_state_noise indices and get rid\n # of those that don't map to numbers. Typical index trick here:\n # * create an array for each legal old element: -1 = no match\n # * give each old element in the array the new number\n # * Filter out the \"no match\" elements.\n #\n reverse_indices = -np.ones(noldfeatures, int)\n reverse_indices[old_indices] = np.arange(nfeatures)\n self.state_noise_idx = reverse_indices[self.state_noise_idx]\n self.state_noise = self.state_noise[self.state_noise_idx != -1,:]\n self.state_noise_idx = self.state_noise_idx[self.state_noise_idx != -1]", "response": "Map the feature indexes based on the next frame s identities\nCOOKIE."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nadds new features to the state object.", "response": "def add_features(self, kept_indices, new_indices,\n new_state_vec, new_state_cov, new_noise_var):\n '''Add new features to the state\n\n kept_indices - the mapping from all indices in the state to new\n indices in the new version\n\n new_indices - the indices of the new features in the new version\n\n new_state_vec - the state vectors for the new indices\n\n new_state_cov - the covariance matrices for the new indices\n\n new_noise_var - the noise variances for the new indices\n '''\n assert len(kept_indices) == len(self.state_vec)\n assert len(new_indices) == len(new_state_vec)\n assert len(new_indices) == len(new_state_cov)\n assert len(new_indices) == len(new_noise_var)\n\n if self.has_cached_obs_vec:\n del self.obs_vec\n if self.has_cached_predicted_state_vec:\n del self.predicted_obs_vec\n\n nfeatures = len(kept_indices) + len(new_indices)\n next_state_vec = np.zeros((nfeatures, self.state_len))\n next_state_cov = np.zeros((nfeatures, self.state_len, self.state_len))\n next_noise_var = np.zeros((nfeatures, self.state_len))\n\n if len(kept_indices) > 0:\n next_state_vec[kept_indices] = self.state_vec\n next_state_cov[kept_indices] = self.state_cov\n next_noise_var[kept_indices] = self.noise_var\n if len(self.state_noise_idx) > 0:\n self.state_noise_idx = kept_indices[self.state_noise_idx]\n if len(new_indices) > 0:\n next_state_vec[new_indices] = new_state_vec\n next_state_cov[new_indices] = new_state_cov\n next_noise_var[new_indices] = new_noise_var\n self.state_vec = next_state_vec\n self.state_cov = next_state_cov\n self.noise_var = next_noise_var"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a deep copy of the state", "response": "def deep_copy(self):\n '''Return a deep copy of the state'''\n c = KalmanState(self.observation_matrix, self.translation_matrix)\n c.state_vec = self.state_vec.copy()\n c.state_cov = self.state_cov.copy()\n c.noise_var = self.noise_var.copy()\n c.state_noise = self.state_noise.copy()\n c.state_noise_idx = self.state_noise_idx.copy()\n return c"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef prcntiles(x,percents):\n '''Equivalent to matlab prctile(x,p), uses linear interpolation.'''\n x=np.array(x).flatten()\n listx = np.sort(x)\n xpcts=[]\n lenlistx=len(listx)\n refs=[]\n for i in range(0,lenlistx):\n r=100*((.5+i)/lenlistx) #refs[i] is percentile of listx[i] in matrix x\n refs.append(r)\n\n rpcts=[]\n for p in percents:\n if prefs[-1]:\n rpcts.append(listx[-1])\n else:\n for j in range(0,lenlistx): #lenlistx=len(refs)\n if refs[j]<=p and refs[j+1]>=p:\n my=listx[j+1]-listx[j]\n mx=refs[j+1]-refs[j]\n m=my/mx #slope of line between points\n rpcts.append((m*(p-refs[j]))+listx[j])\n break\n xpcts.append(rpcts)\n return np.array(xpcts).transpose()", "response": "Equivalent to matlab prctile ( x p uses linear interpolation."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn spline factors for a single sequence", "response": "def spline_factors(u):\n '''u is np.array'''\n\n X = np.array([(1.-u)**3 , 4-(6.*(u**2))+(3.*(u**3)) , 1.+(3.*u)+(3.*(u**2))-(3.*(u**3)) , u**3]) * (1./6)\n \n return X"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the number of entries in a picklist that is larger than val.", "response": "def pick(picklist,val):\n '''Index to first value in picklist that is larger than val.\nIf none is larger, index=len(picklist).'''\n \n assert np.all(np.sort(picklist) == picklist), \"pick list is not ordered correctly\"\n val = np.array(val)\n i_pick, i_val = np.mgrid[0:len(picklist),0:len(val)]\n #\n # Mark a picklist entry as 1 if the value is before or at,\n # mark it as zero if it is afterward\n #\n is_not_larger = picklist[i_pick] <= val[i_val]\n #\n # The index is the number of entries that are 1\n #\n p = np.sum(is_not_larger, 0)\n \n return p"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nconfine x to [ low high ). Values outside are set to low / high. See also restrict.", "response": "def confine(x,low,high):\n '''Confine x to [low,high]. Values outside are set to low/high.\nSee also restrict.'''\n\n y=x.copy()\n y[y < low] = low\n y[y > high] = high\n return y"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef gauss(x,m_y,sigma):\n '''returns the gaussian with mean m_y and std. dev. sigma,\ncalculated at the points of x.'''\n\n e_y = [np.exp((1.0/(2*float(sigma)**2)*-(n-m_y)**2)) for n in np.array(x)]\n y = [1.0/(float(sigma) * np.sqrt(2 * np.pi)) * e for e in e_y]\n \n return np.array(y)", "response": "returns the gaussian with mean m_y and std. dev. sigma at the points of x."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the second derivative of the gaussian with mean m_y and standard deviation sigma calculated at the points of x.", "response": "def d2gauss(x,m_y,sigma):\n '''returns the second derivative of the gaussian with mean m_y,\nand standard deviation sigma, calculated at the points of x.'''\n\n return gauss(x,m_y,sigma)*[-1/sigma**2 + (n-m_y)**2/sigma**4 for n in x]"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef spline_matrix2d(x,y,px,py,mask=None):\n '''For boundary constraints, the first two and last two spline pieces are constrained\nto be part of the same cubic curve.'''\n V = np.kron(spline_matrix(x,px),spline_matrix(y,py))\n \n lenV = len(V)\n \n if mask is not None:\n indices = np.nonzero(mask.T.flatten())\n if len(indices)>1:\n indices = np.nonzero(mask.T.flatten())[1][0]\n newV=V.T[indices]\n V=newV.T\n \n V=V.reshape((V.shape[0],V.shape[1]))\n \n return V", "response": "For boundary constraints the first two and last two spline pieces are constrained\nto be part of the same cubic curve."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nmaking a least squares fit of the spline to the surface x y z.", "response": "def splinefit2d(x, y, z, px, py, mask=None):\n '''Make a least squares fit of the spline (px,py,pz) to the surface (x,y,z).\nIf mask is given, only masked points are used for the regression.'''\n \n if mask is None:\n V = np.array(spline_matrix2d(x, y, px, py))\n a = np.array(z.T.flatten())\n pz = np.linalg.lstsq(V.T, a.T)[0].T\n else:\n V = np.array(spline_matrix2d(x,y,px,py,mask))\n indices = np.nonzero(np.array(mask).T.flatten())\n if len(indices[0])==0:\n pz = np.zeros((len(py),len(px)))\n #indices is empty when mask changes to all zeros\n else:\n a = np.array((z.T.flatten()[indices[0]]))\n pz = np.linalg.lstsq(V.T, a.T)[0].T\n \n pz=pz.reshape((len(py),len(px)))\n return pz.transpose()"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef bg_compensate(img, sigma, splinepoints, scale):\n '''Reads file, subtracts background. Returns [compensated image, background].'''\n \n from PIL import Image\n import pylab\n from matplotlib.image import pil_to_array\n from centrosome.filter import canny\n import matplotlib\n\n img = Image.open(img)\n if img.mode=='I;16':\n # 16-bit image\n # deal with the endianness explicitly... I'm not sure\n # why PIL doesn't get this right.\n imgdata = np.fromstring(img.tostring(),np.uint8)\n imgdata.shape=(int(imgdata.shape[0]/2),2)\n imgdata = imgdata.astype(np.uint16)\n hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0)\n imgdata = imgdata[:,hi]*256 + imgdata[:,lo]\n img_size = list(img.size)\n img_size.reverse()\n new_img = imgdata.reshape(img_size)\n # The magic # for maximum sample value is 281\n if 281 in img.tag:\n img = new_img.astype(np.float32) / img.tag[281][0]\n elif np.max(new_img) < 4096:\n img = new_img.astype(np.float32) / 4095.\n else:\n img = new_img.astype(np.float32) / 65535.\n else:\n img = pil_to_array(img)\n \n pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r)\n pylab.show()\n \n if len(img.shape)>2:\n raise ValueError('Image must be grayscale')\n\n## Create mask that will fix problem when image has black areas outside of well\n edges = canny(img, np.ones(img.shape, bool), 2, .1, .3)\n ci = np.cumsum(edges, 0)\n cj = np.cumsum(edges, 1)\n i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n mask = ci > 0\n mask = mask & (cj > 0)\n mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]])\n mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1])\n \n import time\n t0 = time.clock()\n bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale)\n print(\"Executed in %f sec\" % (time.clock() - t0))\n bg[~mask] = img[~mask]\n\n pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r)\n pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r)\n pylab.show()", "response": "Reads file subtracts background. Returns [ compensated image background."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef mode(a):\n '''Compute the mode of an array\n \n a: an array\n \n returns a vector of values which are the most frequent (more than one\n if there is a tie).\n '''\n a = np.asanyarray(a)\n if a.size == 0:\n return np.zeros(0, a.dtype)\n aa = a.flatten()\n aa.sort()\n indices = np.hstack([[0], np.where(aa[:-1] != aa[1:])[0]+1, [aa.size]])\n counts = indices[1:] - indices[:-1]\n best_indices = indices[:-1][counts == np.max(counts)]\n return aa[best_indices]", "response": "Compute the mode of an array\n \n returns a vector of values which are the most frequent."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef otsu(data, min_threshold=None, max_threshold=None,bins=256):\n assert min_threshold is None or max_threshold is None or min_threshold < max_threshold\n def constrain(threshold):\n if not min_threshold is None and threshold < min_threshold:\n threshold = min_threshold\n if not max_threshold is None and threshold > max_threshold:\n threshold = max_threshold\n return threshold\n \n data = np.atleast_1d(data)\n data = data[~ np.isnan(data)]\n if len(data) == 0:\n return (min_threshold if not min_threshold is None\n else max_threshold if not max_threshold is None\n else 0)\n elif len(data) == 1:\n return constrain(data[0])\n if bins > len(data):\n bins = len(data)\n data.sort()\n var = running_variance(data)\n rvar = np.flipud(running_variance(np.flipud(data))) \n thresholds = data[1:len(data):len(data)//bins]\n score_low = (var[0:len(data)-1:len(data)//bins] * \n np.arange(0,len(data)-1,len(data)//bins))\n score_high = (rvar[1:len(data):len(data)//bins] *\n (len(data) - np.arange(1,len(data),len(data)//bins)))\n scores = score_low + score_high\n if len(scores) == 0:\n return constrain(thresholds[0])\n index = np.argwhere(scores == scores.min()).flatten()\n if len(index)==0:\n return constrain(thresholds[0])\n #\n # Take the average of the thresholds to either side of\n # the chosen value to get an intermediate in cases where there is\n # a steep step between the background and foreground\n index = index[0]\n if index == 0:\n index_low = 0\n else:\n index_low = index-1\n if index == len(thresholds)-1:\n index_high = len(thresholds)-1\n else:\n index_high = index+1 \n return constrain((thresholds[index_low]+thresholds[index_high]) / 2)", "response": "Compute a threshold using Otsu s method\n \n "} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef entropy(data, bins=256):\n \n data = np.atleast_1d(data)\n data = data[~ np.isnan(data)]\n if len(data) == 0:\n return 0\n elif len(data) == 1:\n return data[0]\n\n if bins > len(data):\n bins = len(data)\n data.sort()\n var = running_variance(data)+1.0/512.0\n rvar = np.flipud(running_variance(np.flipud(data)))+1.0/512.0 \n thresholds = data[1:len(data):len(data)//bins]\n w = np.arange(0,len(data)-1,len(data)//bins)\n score_low = w * np.log(var[0:len(data)-1:len(data)//bins] *\n w * np.sqrt(2*np.pi*np.exp(1)))\n score_low[np.isnan(score_low)]=0\n \n w = len(data) - np.arange(1,len(data),len(data)//bins)\n score_high = w * np.log(rvar[1:len(data):len(data)//bins] * w *\n np.sqrt(2*np.pi*np.exp(1)))\n score_high[np.isnan(score_high)]=0\n scores = score_low + score_high\n index = np.argwhere(scores == scores.min()).flatten()\n if len(index)==0:\n return thresholds[0]\n #\n # Take the average of the thresholds to either side of\n # the chosen value to get an intermediate in cases where there is\n # a steep step between the background and foreground\n index = index[0]\n if index == 0:\n index_low = 0\n else:\n index_low = index-1\n if index == len(thresholds)-1:\n index_high = len(thresholds)-1\n else:\n index_high = index+1 \n return (thresholds[index_low]+thresholds[index_high]) / 2", "response": "Compute a threshold using Ray s entropy measurement\n data - an array of intensity values between zero and one\n bins - an array of bins that are used to bin the data into this many equally - spaced bins"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef otsu3(data, min_threshold=None, max_threshold=None,bins=128): \n assert min_threshold is None or max_threshold is None or min_threshold < max_threshold\n \n #\n # Compute the running variance and reverse running variance.\n # \n data = np.atleast_1d(data)\n data = data[~ np.isnan(data)]\n data.sort()\n if len(data) == 0:\n return 0\n var = running_variance(data)\n rvar = np.flipud(running_variance(np.flipud(data)))\n if bins > len(data):\n bins = len(data)\n bin_len = int(len(data)//bins) \n thresholds = data[0:len(data):bin_len]\n score_low = (var[0:len(data):bin_len] * \n np.arange(0,len(data),bin_len))\n score_high = (rvar[0:len(data):bin_len] *\n (len(data) - np.arange(0,len(data),bin_len)))\n #\n # Compute the middles\n #\n cs = data.cumsum()\n cs2 = (data**2).cumsum()\n i,j = np.mgrid[0:score_low.shape[0],0:score_high.shape[0]]*bin_len\n diff = (j-i).astype(float)\n w = diff\n mean = (cs[j] - cs[i]) / diff\n mean2 = (cs2[j] - cs2[i]) / diff\n score_middle = w * (mean2 - mean**2)\n score_middle[i >= j] = np.Inf\n score = score_low[i*bins//len(data)] + score_middle + score_high[j*bins//len(data)]\n best_score = np.min(score)\n best_i_j = np.argwhere(score==best_score)\n return (thresholds[best_i_j[0,0]],thresholds[best_i_j[0,1]])", "response": "Compute a threshold using a 3 - category Otsu - like method."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncompute the entropy score given a variance and # of bins", "response": "def entropy_score(var,bins, w=None, decimate=True):\n '''Compute entropy scores, given a variance and # of bins\n \n '''\n if w is None:\n n = len(var)\n w = np.arange(0,n,n//bins) / float(n)\n if decimate:\n n = len(var)\n var = var[0:n:n//bins]\n score = w * np.log(var * w * np.sqrt(2*np.pi*np.exp(1)))\n score[np.isnan(score)]=np.Inf\n return score"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngiving a vector x compute the variance for x", "response": "def running_variance(x):\n '''Given a vector x, compute the variance for x[0:i]\n \n Thank you http://www.johndcook.com/standard_deviation.html\n S[i] = S[i-1]+(x[i]-mean[i-1])*(x[i]-mean[i])\n var(i) = S[i] / (i-1)\n '''\n n = len(x)\n # The mean of x[0:i]\n m = x.cumsum() / np.arange(1,n+1)\n # x[i]-mean[i-1] for i=1...\n x_minus_mprev = x[1:]-m[:-1]\n # x[i]-mean[i] for i=1...\n x_minus_m = x[1:]-m[1:]\n # s for i=1...\n s = (x_minus_mprev*x_minus_m).cumsum()\n var = s / np.arange(1,n)\n # Prepend Inf so we have a variance for x[0]\n return np.hstack(([0],var))"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef outline(labels):\n \n output = numpy.zeros(labels.shape, labels.dtype)\n lr_different = labels[1:,:]!=labels[:-1,:]\n ud_different = labels[:,1:]!=labels[:,:-1]\n d1_different = labels[1:,1:]!=labels[:-1,:-1]\n d2_different = labels[1:,:-1]!=labels[:-1,1:]\n different = numpy.zeros(labels.shape, bool)\n different[1:,:][lr_different] = True\n different[:-1,:][lr_different] = True\n different[:,1:][ud_different] = True\n different[:,:-1][ud_different] = True\n different[1:,1:][d1_different] = True\n different[:-1,:-1][d1_different] = True\n different[1:,:-1][d2_different] = True\n different[:-1,1:][d2_different] = True\n #\n # Labels on edges need outlines\n #\n different[0,:] = True\n different[:,0] = True\n different[-1,:] = True\n different[:,-1] = True\n \n output[different] = labels[different]\n return output", "response": "Given a label matrix return a matrix of the outlines of the labeled objects."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncomputes the Euclidean distance between two points.", "response": "def euclidean_dist(point1, point2):\n \"\"\"Compute the Euclidean distance between two points.\n\n Parameters\n ----------\n point1, point2 : 2-tuples of float\n The input points.\n\n Returns\n -------\n d : float\n The distance between the input points.\n\n Examples\n --------\n >>> point1 = (1.0, 2.0)\n >>> point2 = (4.0, 6.0) # (3., 4.) away, simplest Pythagorean triangle\n >>> euclidean_dist(point1, point2)\n 5.0\n \"\"\"\n (x1, y1) = point1\n (x2, y2) = point2\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef from_labels(labels):\n labels = labels.astype(int)\n\n areas = scipy.ndimage.measurements.sum(labels != 0, labels, list(range(1, numpy.max(labels) + 1)))\n\n existing_labels = [i for (i, a) in enumerate(areas, 1) if a > 0]\n\n existing_areas = [a for a in areas if a > 0]\n\n existing_centers = scipy.ndimage.measurements.center_of_mass(labels != 0, labels, existing_labels)\n\n zipped = zip(existing_labels, existing_centers, existing_areas)\n\n features = [CellFeatures(c, a, i, labels.shape)\n for i, c, a in zipped if a != 0]\n\n return features", "response": "Creates a list of cell features based on label image"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a list of traces from given assignment and cell data.", "response": "def from_detections_assignment(detections_1, detections_2, assignments):\n \"\"\"\n Creates traces out of given assignment and cell data.\n \"\"\"\n traces = []\n\n for d1n, d2n in six.iteritems(assignments):\n # check if the match is between existing cells\n if d1n < len(detections_1) and d2n < len(detections_2):\n traces.append(Trace(detections_1[d1n], detections_2[d2n]))\n\n return traces"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nrunning the tracking algorithm.", "response": "def run_tracking(self, label_image_1, label_image_2):\n \"\"\"\n Tracks cells between input label images.\n\n @returns: injective function from old objects to new objects (pairs of [old, new]). Number are compatible with labels.\n \"\"\"\n self.scale = self.parameters_tracking[\"avgCellDiameter\"] / 35.0\n\n detections_1 = self.derive_detections(label_image_1)\n\n detections_2 = self.derive_detections(label_image_2)\n\n # Calculate tracking based on cell features and position.\n traces = self.find_initials_traces(detections_1, detections_2)\n\n # Use neighbourhoods to improve tracking.\n for _ in range(int(self.parameters_tracking[\"iterations\"])):\n traces = self.improve_traces(detections_1, detections_2, traces)\n\n # Filter traces.\n return [(trace.previous_cell.number, trace.current_cell.number) for trace in traces]"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef is_cell_big(self, cell_detection):\n return cell_detection.area > self.parameters_tracking[\"big_size\"] * self.scale * self.scale", "response": "Check if the cell is considered big."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nfind k closest neighbours of the given cell.", "response": "def find_closest_neighbours(cell, all_cells, k, max_dist):\n \"\"\"\n Find k closest neighbours of the given cell.\n :param CellFeatures cell: cell of interest\n :param all_cells: cell to consider as neighbours\n :param int k: number of neighbours to be returned\n :param int max_dist: maximal distance in pixels to consider neighbours\n :return: k closest neighbours\n \"\"\"\n all_cells = [c for c in all_cells if c != cell]\n\n sorted_cells = sorted([(cell.distance(c), c) for c in all_cells])\n\n return [sc[1] for sc in sorted_cells[:k] if sc[0] <= max_dist]"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef calculate_basic_cost(self, d1, d2):\n distance = euclidean_dist(d1.center, d2.center) / self.scale\n\n area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)\n\n return distance + self.parameters_cost_initial[\"area_weight\"] * area_change", "response": "Calculates the basic cost between two cells."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncalculating the cost between two cells taking into account the movement of cells neighbours.", "response": "def calculate_localised_cost(self, d1, d2, neighbours, motions):\n \"\"\"\n Calculates assignment cost between two cells taking into account the movement of cells neighbours.\n\n :param CellFeatures d1: detection in first frame\n :param CellFeatures d2: detection in second frame\n \"\"\"\n my_nbrs_with_motion = [n for n in neighbours[d1] if n in motions]\n\n my_motion = (d1.center[0] - d2.center[0], d1.center[1] - d2.center[1])\n\n if my_nbrs_with_motion == []:\n distance = euclidean_dist(d1.center, d2.center) / self.scale\n else:\n # it is not in motions if there is no trace (cell is considered to vanish)\n distance = min([euclidean_dist(my_motion, motions[n]) for n in my_nbrs_with_motion]) / self.scale\n\n area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)\n\n return distance + self.parameters_cost_iteration[\"area_weight\"] * area_change"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncalculates assignment costs between two detections and empty spaces. The smaller cost the better.", "response": "def calculate_costs(self, detections_1, detections_2, calculate_match_cost, params):\n \"\"\"\n Calculates assignment costs between detections and 'empty' spaces. The smaller cost the better.\n\n @param detections_1: cell list of size n in previous frame\n @param detections_2: cell list of size m in current frame\n\n @return: cost matrix (n+m)x(n+m) extended by cost of matching cells with emptiness\n \"\"\"\n global invalid_match\n\n size_sum = len(detections_1) + len(detections_2)\n\n # Cost matrix extended by matching cells with nothing\n # (for detection 1 it means losing cells, for detection 2 it means new cells).\n cost_matrix = numpy.zeros((size_sum, size_sum))\n\n # lost cells cost\n cost_matrix[0:len(detections_1), len(detections_2):size_sum] = params[\"default_empty_cost\"] + (1 - numpy.eye(\n len(detections_1), len(detections_1))) * invalid_match\n\n # new cells cost\n cost_matrix[len(detections_1):size_sum, 0:len(detections_2)] = params[\"default_empty_cost\"] + (1 - numpy.eye(\n len(detections_2), len(detections_2))) * invalid_match\n\n # increase costs for reliable detections\n for row in [i for i in range(0, len(detections_1)) if detections_1[i].is_reliable() and (\n not params[\"check_if_big\"] or self.is_cell_big(detections_1[i]))]:\n cost_matrix[row, len(detections_2):size_sum] *= params[\"default_empty_reliable_cost_mult\"]\n\n for col in [i for i in range(0, len(detections_2)) if detections_2[i].is_reliable() and (\n not params[\"check_if_big\"] or self.is_cell_big(detections_2[i]))]:\n cost_matrix[len(detections_1):size_sum, col] *= params[\"default_empty_reliable_cost_mult\"]\n\n # calculate cost of matching cells\n def cost_if_not_too_far(detection_1, detection_2):\n if detection_1.distance(detection_2) <= self.parameters_tracking[\"max_distance\"]:\n return calculate_match_cost(detection_1, detection_2)\n else:\n return invalid_match\n\n cost_matrix[0:len(detections_1), 0:len(detections_2)] = [[cost_if_not_too_far(d1, d2) for d2 in detections_2] for d1 in detections_1]\n\n return cost_matrix"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef solve_assignement(self, costs):\n if costs is None or len(costs) == 0:\n return dict()\n\n n = costs.shape[0]\n\n pairs = [(i, j) for i in range(0, n) for j in range(0, n) if costs[i, j] < invalid_match]\n\n costs_list = [costs[i, j] for (i, j) in pairs]\n\n assignment = lapjv.lapjv(list(zip(*pairs))[0], list(zip(*pairs))[1], costs_list)\n\n indexes = enumerate(list(assignment[0]))\n\n return dict([(row, col) for row, col in indexes])", "response": "Solves assignment problem using Hungarian implementation by Brian M. Clapper."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nsmoothing the image with a per - pixel random multiplier", "response": "def smooth_with_noise(image, bits):\n \"\"\"Smooth the image with a per-pixel random multiplier\n \n image - the image to perturb\n bits - the noise is this many bits below the pixel value\n \n The noise is random with normal distribution, so the individual pixels\n get either multiplied or divided by a normally distributed # of bits\n \"\"\"\n \n rr = np.random.RandomState()\n rr.seed(0)\n r = rr.normal(size=image.shape)\n delta = pow(2.0,-bits)\n image_copy = np.clip(image, delta, 1)\n result = np.exp2(np.log2(image_copy + delta) * r + \n (1-r) * np.log2(image_copy))\n result[result>1] = 1\n result[result<0] = 0\n return result"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef smooth_with_function_and_mask(image, function, mask):\n not_mask = np.logical_not(mask)\n bleed_over = function(mask.astype(float))\n masked_image = np.zeros(image.shape, image.dtype)\n masked_image[mask] = image[mask]\n smoothed_image = function(masked_image)\n output_image = smoothed_image / (bleed_over + np.finfo(float).eps)\n return output_image", "response": "Smooth an image with a linear function ignoring the contribution of masked pixels and mask the image"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a 2 - d Gaussian convolution kernel", "response": "def circular_gaussian_kernel(sd,radius):\n \"\"\"Create a 2-d Gaussian convolution kernel\n \n sd - standard deviation of the gaussian in pixels\n radius - build a circular kernel that convolves all points in the circle\n bounded by this radius \n \"\"\"\n i,j = np.mgrid[-radius:radius+1,-radius:radius+1].astype(float) / radius\n mask = i**2 + j**2 <= 1\n i = i * radius / sd\n j = j * radius / sd\n\n kernel = np.zeros((2*radius+1,2*radius+1))\n kernel[mask] = np.e ** (-(i[mask]**2+j[mask]**2) /\n (2 * sd **2))\n #\n # Normalize the kernel so that there is no net effect on a uniform image\n #\n kernel = kernel / np.sum(kernel)\n return kernel"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef fit_polynomial(pixel_data, mask, clip=True):\n '''Return an \"image\" which is a polynomial fit to the pixel data\n \n Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F\n \n pixel_data - a two-dimensional numpy array to be fitted\n \n mask - a mask of pixels whose intensities should be considered in the\n least squares fit\n \n clip - if True, clip the output array so that pixels less than zero\n in the fitted image are zero and pixels that are greater than\n one are one.\n '''\n mask = np.logical_and(mask,pixel_data > 0)\n if not np.any(mask):\n return pixel_data\n x,y = np.mgrid[0:pixel_data.shape[0],0:pixel_data.shape[1]]\n x2 = x*x\n y2 = y*y\n xy = x*y\n o = np.ones(pixel_data.shape)\n a = np.array([x[mask],y[mask],x2[mask],y2[mask],xy[mask],o[mask]])\n coeffs = scipy.linalg.lstsq(a.transpose(),pixel_data[mask])[0]\n output_pixels = np.sum([coeff * index for coeff, index in\n zip(coeffs, [x,y,x2,y2,xy,o])],0)\n if clip:\n output_pixels[output_pixels > 1] = 1\n output_pixels[output_pixels < 0] = 0\n return output_pixels", "response": "Fit the image to the polynomial Ax^2 + By ** 2 + Cxy + Dx + Ey + F\n \n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_threshold(threshold_method, threshold_modifier, image, \n mask=None, labels = None,\n threshold_range_min = None, threshold_range_max = None,\n threshold_correction_factor = 1.0,\n adaptive_window_size = 10, **kwargs):\n \"\"\"Compute a threshold for an image\n \n threshold_method - one of the TM_ methods above\n threshold_modifier - TM_GLOBAL to calculate one threshold over entire image\n TM_ADAPTIVE to calculate a per-pixel threshold\n TM_PER_OBJECT to calculate a different threshold for\n each object\n image - a NxM numpy array of the image data\n\n Returns a tuple of local_threshold and global_threshold where:\n * global_threshold is the single number calculated using the threshold\n method over the whole image\n * local_threshold is the global_threshold for global methods. For adaptive\n and per-object thresholding, local_threshold is a matrix of threshold\n values representing the threshold to be applied at each pixel of the\n image.\n \n Different methods have optional and required parameters:\n Required:\n TM_PER_OBJECT:\n labels - a labels matrix that defines the extents of the individual objects\n to be thresholded separately.\n \n Optional:\n All:\n mask - a mask of the significant pixels in the image\n threshold_range_min, threshold_range_max - constrain the threshold\n values to be examined to values between these limits\n threshold_correction_factor - the calculated threshold is multiplied\n by this number to get the final threshold\n TM_MOG (mixture of Gaussians):\n object_fraction - fraction of image expected to be occupied by objects\n (pixels that are above the threshold)\n TM_OTSU - We have algorithms derived from Otsu. There is a three-class\n version of Otsu in addition to the two class. There is also\n an entropy measure in addition to the weighted variance.\n two_class_otsu - assume that the distribution represents\n two intensity classes if true, three if false.\n use_weighted_variance - use Otsu's weighted variance if true,\n an entropy measure if false\n assign_middle_to_foreground - assign pixels in the middle class\n in a three-class Otsu to the foreground if true\n or the background if false.\n \"\"\"\n global_threshold = get_global_threshold(\n threshold_method, image, mask, **kwargs)\n global_threshold *= threshold_correction_factor\n if not threshold_range_min is None:\n global_threshold = max(global_threshold, threshold_range_min)\n if not threshold_range_max is None:\n global_threshold = min(global_threshold, threshold_range_max)\n if threshold_modifier == TM_GLOBAL:\n local_threshold=global_threshold\n elif threshold_modifier == TM_ADAPTIVE:\n local_threshold = get_adaptive_threshold(\n threshold_method, image, global_threshold,\n mask, adaptive_window_size = adaptive_window_size, **kwargs)\n local_threshold = local_threshold * threshold_correction_factor\n elif threshold_modifier == TM_PER_OBJECT:\n local_threshold = get_per_object_threshold(\n threshold_method, image, global_threshold, mask, labels,\n threshold_range_min, threshold_range_max,**kwargs)\n local_threshold = local_threshold * threshold_correction_factor\n else:\n raise NotImplementedError(\"%s thresholding is not implemented\"%(threshold_modifier))\n if isinstance(local_threshold, np.ndarray):\n #\n # Constrain thresholds to within .7 to 1.5 of the global threshold.\n #\n threshold_range_min = max(threshold_range_min, global_threshold * .7)\n threshold_range_max = min(threshold_range_max, global_threshold * 1.5)\n if not threshold_range_min is None:\n local_threshold[local_threshold < threshold_range_min] = \\\n threshold_range_min\n if not threshold_range_max is None:\n local_threshold[local_threshold > threshold_range_max] = \\\n threshold_range_max\n if (threshold_modifier == TM_PER_OBJECT) and (labels is not None):\n local_threshold[labels == 0] = 1.0\n else:\n if not threshold_range_min is None:\n local_threshold = max(local_threshold, threshold_range_min)\n if not threshold_range_max is None:\n local_threshold = min(local_threshold, threshold_range_max)\n return local_threshold, global_threshold", "response": "Compute a threshold for an image."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncomputes a single threshold over the whole image", "response": "def get_global_threshold(threshold_method, image, mask = None, **kwargs):\n \"\"\"Compute a single threshold over the whole image\"\"\"\n if mask is not None and not np.any(mask):\n return 1\n \n if threshold_method == TM_OTSU:\n fn = get_otsu_threshold\n elif threshold_method == TM_MOG:\n fn = get_mog_threshold\n elif threshold_method == TM_BACKGROUND:\n fn = get_background_threshold\n elif threshold_method == TM_ROBUST_BACKGROUND:\n fn = get_robust_background_threshold\n elif threshold_method == TM_RIDLER_CALVARD:\n fn = get_ridler_calvard_threshold\n elif threshold_method == TM_KAPUR:\n fn = get_kapur_threshold\n elif threshold_method == TM_MCT:\n fn = get_maximum_correlation_threshold\n else:\n raise NotImplementedError(\"%s algorithm not implemented\"%(threshold_method))\n kwargs = dict([(k, v) for k, v in kwargs.items()\n if k in fn.args])\n return fn(image, mask, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ngive an image and a threshold compute a global threshold per pixel.", "response": "def get_adaptive_threshold(threshold_method, image, threshold,\n mask = None,\n adaptive_window_size = 10,\n **kwargs):\n \n \"\"\"Given a global threshold, compute a threshold per pixel\n \n Break the image into blocks, computing the threshold per block.\n Afterwards, constrain the block threshold to .7 T < t < 1.5 T.\n \n Block sizes must be at least 50x50. Images > 500 x 500 get 10x10\n blocks.\n \"\"\"\n # for the X and Y direction, find the # of blocks, given the\n # size constraints\n image_size = np.array(image.shape[:2],dtype=int)\n nblocks = image_size // adaptive_window_size\n #\n # Use a floating point block size to apportion the roundoff\n # roughly equally to each block\n #\n increment = ( np.array(image_size,dtype=float) / \n np.array(nblocks,dtype=float))\n #\n # Put the answer here\n #\n thresh_out = np.zeros(image_size, image.dtype)\n #\n # Loop once per block, computing the \"global\" threshold within the\n # block.\n #\n block_threshold = np.zeros([nblocks[0],nblocks[1]])\n for i in range(nblocks[0]):\n i0 = int(i*increment[0])\n i1 = int((i+1)*increment[0])\n for j in range(nblocks[1]):\n j0 = int(j*increment[1])\n j1 = int((j+1)*increment[1])\n block = image[i0:i1,j0:j1]\n block_mask = None if mask is None else mask[i0:i1,j0:j1]\n block_threshold[i,j] = get_global_threshold(\n threshold_method, \n block, mask = block_mask,\n **kwargs)\n #\n # Use a cubic spline to blend the thresholds across the image to avoid image artifacts\n #\n spline_order = min(3, np.min(nblocks) - 1)\n xStart = int(increment[0] / 2)\n xEnd = int((nblocks[0] - 0.5) * increment[0])\n yStart = int(increment[1] / 2)\n yEnd = int((nblocks[1] - 0.5) * increment[1])\n xtStart = .5\n xtEnd = image.shape[0] - .5\n ytStart = .5\n ytEnd = image.shape[1] - .5\n block_x_coords = np.linspace(xStart,xEnd, nblocks[0])\n block_y_coords = np.linspace(yStart,yEnd, nblocks[1])\n adaptive_interpolation = scipy.interpolate.RectBivariateSpline(\n block_x_coords, block_y_coords, block_threshold,\n bbox = (xtStart, xtEnd, ytStart, ytEnd),\n kx = spline_order, ky = spline_order)\n thresh_out_x_coords = np.linspace(.5, int(nblocks[0] * increment[0]) - .5, thresh_out.shape[0])\n thresh_out_y_coords = np.linspace(.5, int(nblocks[1] * increment[1]) - .5 , thresh_out.shape[1])\n\n thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)\n \n return thresh_out"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncalculates the threshold per object for the specified method on the image.", "response": "def get_per_object_threshold(method, image, threshold, mask=None, labels=None,\n threshold_range_min = None,\n threshold_range_max = None,\n **kwargs):\n \"\"\"Return a matrix giving threshold per pixel calculated per-object\n \n image - image to be thresholded\n mask - mask out \"don't care\" pixels\n labels - a label mask indicating object boundaries\n threshold - the global threshold\n \"\"\"\n if labels is None:\n labels = np.ones(image.shape,int)\n if not mask is None:\n labels[np.logical_not(mask)] = 0 \n label_extents = scipy.ndimage.find_objects(labels,np.max(labels))\n local_threshold = np.ones(image.shape,image.dtype)\n for i, extent in enumerate(label_extents, start=1):\n label_mask = labels[extent]==i\n if not mask is None:\n label_mask = np.logical_and(mask[extent], label_mask)\n values = image[extent]\n per_object_threshold = get_global_threshold(\n method, values, mask = label_mask, **kwargs)\n local_threshold[extent][label_mask] = per_object_threshold\n return local_threshold"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_mog_threshold(image, mask=None, object_fraction = 0.2):\n cropped_image = np.array(image.flat) if mask is None else image[mask]\n pixel_count = np.product(cropped_image.shape)\n max_count = 512**2 # maximum # of pixels analyzed\n #\n # We need at least 3 pixels to keep from crashing because the highest \n # and lowest are chopped out below.\n #\n object_fraction = float(object_fraction)\n background_fraction = 1.0-object_fraction\n if pixel_count < 3/min(object_fraction,background_fraction):\n return 1\n if np.max(cropped_image)==np.min(cropped_image):\n return cropped_image[0]\n number_of_classes = 3\n if pixel_count > max_count:\n np.random.seed(0)\n pixel_indices = np.random.permutation(pixel_count)[:max_count]\n cropped_image = cropped_image[pixel_indices]\n # Initialize mean and standard deviations of the three Gaussian\n # distributions by looking at the pixel intensities in the original\n # image and by considering the percentage of the image that is\n # covered by object pixels. Class 1 is the background class and Class\n # 3 is the object class. Class 2 is an intermediate class and we will\n # decide later if it encodes background or object pixels. Also, for\n # robustness the we remove 1% of the smallest and highest intensities\n # in case there are any quantization effects that have resulted in\n # unnaturally many 0:s or 1:s in the image.\n cropped_image.sort()\n one_percent = (np.product(cropped_image.shape) + 99)//100\n cropped_image=cropped_image[one_percent:-one_percent]\n pixel_count = np.product(cropped_image.shape)\n # Guess at the class means for the 3 classes: background,\n # in-between and object\n bg_pixel = cropped_image[int(round(pixel_count * background_fraction/2.0))]\n fg_pixel = cropped_image[int(round(pixel_count * (1-object_fraction/2)))]\n class_mean = np.array([bg_pixel, (bg_pixel+fg_pixel)/2,fg_pixel])\n class_std = np.ones((3,)) * 0.15\n # Initialize prior probabilities of a pixel belonging to each class.\n # The intermediate class steals some probability from the background\n # and object classes.\n class_prob = np.array([3.0/4.0 * background_fraction ,\n 1.0/4.0,\n 3.0/4.0 * object_fraction])\n # Expectation-Maximization algorithm for fitting the three Gaussian\n # distributions/classes to the data. Note, the code below is general\n # and works for any number of classes. Iterate until parameters don't\n # change anymore.\n class_count = np.prod(class_mean.shape)\n #\n # Do a coarse iteration on subsampled data and a fine iteration on the real\n # data\n #\n r = np.random.RandomState()\n r.seed(np.frombuffer(cropped_image[:100].data, np.uint8).tolist())\n for data in (\n r.permutation(cropped_image)[0:(len(cropped_image) // 10)],\n cropped_image):\n delta = 1\n pixel_count = len(data)\n while delta > 0.001:\n old_class_mean = class_mean.copy()\n # Update probabilities of a pixel belonging to the background or\n # object1 or object2\n pixel_class_prob = np.ndarray((pixel_count,class_count))\n for k in range(class_count):\n norm = scipy.stats.norm(class_mean[k],class_std[k])\n pixel_class_prob[:,k] = class_prob[k] * norm.pdf(data)\n pixel_class_normalizer = np.sum(pixel_class_prob,1)+.000000000001\n for k in range(class_count):\n pixel_class_prob[:,k] = pixel_class_prob[:,k] / pixel_class_normalizer\n # Update parameters in Gaussian distributions\n class_prob[k] = np.mean(pixel_class_prob[:,k])\n class_mean[k] = (np.sum(pixel_class_prob[:,k] * data) /\n (class_prob[k] * pixel_count))\n class_std[k] = \\\n math.sqrt(np.sum(pixel_class_prob[:,k] * \n (data-class_mean[k])**2)/\n (pixel_count * class_prob[k])) + .000001\n delta = np.sum(np.abs(old_class_mean - class_mean))\n # Now the Gaussian distributions are fitted and we can describe the\n # histogram of the pixel intensities as the sum of these Gaussian\n # distributions. To find a threshold we first have to decide if the\n # intermediate class 2 encodes background or object pixels. This is\n # done by choosing the combination of class probabilities \"class_prob\"\n # that best matches the user input \"object_fraction\".\n \n # Construct an equally spaced array of values between the background\n # and object mean\n ndivisions = 10000\n level = (np.arange(ndivisions) *\n ((class_mean[2]-class_mean[0]) / ndivisions)\n + class_mean[0])\n class_gaussian = np.ndarray((ndivisions,class_count))\n for k in range(class_count):\n norm = scipy.stats.norm(class_mean[k],class_std[k])\n class_gaussian[:,k] = class_prob[k] * norm.pdf(level)\n if (abs(class_prob[1]+class_prob[2]-object_fraction) <\n abs(class_prob[2]-object_fraction)):\n # classifying the intermediate as object more closely models\n # the user's desired object fraction\n background_distribution = class_gaussian[:,0]\n object_distribution = class_gaussian[:,1]+class_gaussian[:,2]\n else:\n background_distribution = class_gaussian[:,0]+class_gaussian[:,1]\n object_distribution = class_gaussian[:,2]\n # Now, find the threshold at the intersection of the background\n # distribution and the object distribution.\n index = np.argmin(np.abs(background_distribution - object_distribution))\n return level[index]", "response": "Compute a suitable background using a mixture of gaussians\n \n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nget the threshold based on the mode of the image.", "response": "def get_background_threshold(image, mask = None):\n \"\"\"Get threshold based on the mode of the image\n The threshold is calculated by calculating the mode and multiplying by\n 2 (an arbitrary empirical factor). The user will presumably adjust the\n multiplication factor as needed.\"\"\"\n cropped_image = np.array(image.flat) if mask is None else image[mask]\n if np.product(cropped_image.shape)==0:\n return 0\n img_min = np.min(cropped_image)\n img_max = np.max(cropped_image)\n if img_min == img_max:\n return cropped_image[0]\n \n # Only do the histogram between values a bit removed from saturation\n robust_min = 0.02 * (img_max - img_min) + img_min\n robust_max = 0.98 * (img_max - img_min) + img_min\n nbins = 256\n cropped_image = cropped_image[np.logical_and(cropped_image > robust_min,\n cropped_image < robust_max)]\n if len(cropped_image) == 0:\n return robust_min\n \n h = scipy.ndimage.histogram(cropped_image, robust_min, robust_max, nbins)\n index = np.argmax(h)\n cutoff = float(index) / float(nbins-1)\n #\n # If we have a low (or almost no) background, the cutoff will be\n # zero since the background falls into the lowest bin. We want to\n # offset by the robust cutoff factor of .02. We rescale by 1.04\n # to account for the 0.02 at the top and bottom.\n #\n cutoff = (cutoff + 0.02) / 1.04\n return img_min + cutoff * 2 * (img_max - img_min)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_robust_background_threshold(image, \n mask = None,\n lower_outlier_fraction = 0.05,\n upper_outlier_fraction = 0.05,\n deviations_above_average = 2.0,\n average_fn = np.mean,\n variance_fn = np.std):\n \"\"\"Calculate threshold based on mean & standard deviation\n The threshold is calculated by trimming the top and bottom 5% of\n pixels off the image, then calculating the mean and standard deviation\n of the remaining image. The threshold is then set at 2 (empirical\n value) standard deviations above the mean.\n\n image - the image to threshold\n mask - mask of pixels to consider (default = all pixels)\n lower_outlier_fraction - after ordering the pixels by intensity, remove\n the pixels from 0 to len(image) * lower_outlier_fraction from\n the threshold calculation (default = .05).\n upper_outlier_fraction - remove the pixels from \n len(image) * (1 - upper_outlier_fraction) to len(image) from \n consideration (default = .05).\n deviations_above_average - calculate the standard deviation or MAD and\n multiply by this number and add to the average to get the final \n threshold (default = 2)\n average_fn - function used to calculate the average intensity (e.g.\n np.mean, np.median or some sort of mode function). Default = np.mean\n variance_fn - function used to calculate the amount of variance.\n Default = np.sd\n \"\"\" \n\n cropped_image = np.array(image.flat) if mask is None else image[mask]\n n_pixels = np.product(cropped_image.shape)\n if n_pixels<3:\n return 0\n \n cropped_image.sort()\n if cropped_image[0] == cropped_image[-1]:\n return cropped_image[0]\n low_chop = int(round(n_pixels * lower_outlier_fraction))\n hi_chop = n_pixels - int(round(n_pixels * upper_outlier_fraction))\n im = cropped_image if low_chop == 0 else cropped_image[low_chop:hi_chop]\n mean = average_fn(im)\n sd = variance_fn(im)\n return mean+sd*deviations_above_average", "response": "Calculate the robust background threshold for a given image."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef mad(a):\n '''Calculate the median absolute deviation of a sample\n \n a - a numpy array-like collection of values\n \n returns the median of the deviation of a from its median.\n '''\n a = np.asfarray(a).flatten()\n return np.median(np.abs(a - np.median(a)))", "response": "Calculate the median absolute deviation of a sample\n \n returns the median of the deviation of a from its median."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef binned_mode(a):\n '''Calculate a binned mode of a sample\n \n a - array of values\n \n This routine bins the sample into np.sqrt(len(a)) bins. This is a\n number that is a compromise between fineness of measurement and\n the stochastic nature of counting which roughly scales as the\n square root of the sample size.\n '''\n a = np.asarray(a).flatten()\n a_min = np.min(a)\n a_max = np.max(a)\n n_bins = np.ceil(np.sqrt(len(a)))\n b = ((a - a_min) / (a_max - a_min) * n_bins).astype(int)\n idx = np.argmax(np.bincount(b))\n return np.percentile(a, 100 * float(idx+.5) / n_bins)", "response": "Calculate a binned mode of a sample\n \n "} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nfinds a threshold using the method of Ridler and Calvard.", "response": "def get_ridler_calvard_threshold(image, mask = None):\n \"\"\"Find a threshold using the method of Ridler and Calvard\n \n The reference for this method is:\n \"Picture Thresholding Using an Iterative Selection Method\" \n by T. Ridler and S. Calvard, in IEEE Transactions on Systems, Man and\n Cybernetics, vol. 8, no. 8, August 1978.\n \"\"\"\n cropped_image = np.array(image.flat) if mask is None else image[mask]\n if np.product(cropped_image.shape)<3:\n return 0\n if np.min(cropped_image) == np.max(cropped_image):\n return cropped_image[0]\n \n # We want to limit the dynamic range of the image to 256. Otherwise,\n # an image with almost all values near zero can give a bad result.\n min_val = np.max(cropped_image)/256;\n cropped_image[cropped_image < min_val] = min_val;\n im = np.log(cropped_image);\n min_val = np.min(im);\n max_val = np.max(im);\n im = (im - min_val)/(max_val - min_val);\n pre_thresh = 0;\n # This method needs an initial value to start iterating. Using\n # graythresh (Otsu's method) is probably not the best, because the\n # Ridler Calvard threshold ends up being too close to this one and in\n # most cases has the same exact value.\n new_thresh = otsu(im)\n delta = 0.00001;\n while abs(pre_thresh - new_thresh)>delta:\n pre_thresh = new_thresh;\n mean1 = np.mean(im[im=pre_thresh]);\n new_thresh = np.mean([mean1,mean2]);\n return math.exp(min_val + (max_val-min_val)*new_thresh);"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_kapur_threshold(image, mask=None):\n cropped_image = np.array(image.flat) if mask is None else image[mask]\n if np.product(cropped_image.shape)<3:\n return 0\n if np.min(cropped_image) == np.max(cropped_image):\n return cropped_image[0]\n log_image = np.log2(smooth_with_noise(cropped_image, 8))\n min_log_image = np.min(log_image)\n max_log_image = np.max(log_image)\n histogram = scipy.ndimage.histogram(log_image,\n min_log_image,\n max_log_image,\n 256)\n histogram_values = (min_log_image + (max_log_image - min_log_image)*\n np.arange(256, dtype=float) / 255)\n # drop any zero bins\n keep = histogram != 0\n histogram = histogram[keep]\n histogram_values = histogram_values[keep]\n # check for corner cases\n if np.product(histogram_values)==1:\n return 2**histogram_values[0] \n # Normalize to probabilities\n p = histogram.astype(float) / float(np.sum(histogram))\n # Find the probabilities totals up to and above each possible threshold.\n lo_sum = np.cumsum(p);\n hi_sum = lo_sum[-1] - lo_sum;\n lo_e = np.cumsum(p * np.log2(p));\n hi_e = lo_e[-1] - lo_e;\n\n # compute the entropies\n lo_entropy = lo_e / lo_sum - np.log2(lo_sum);\n hi_entropy = hi_e / hi_sum - np.log2(hi_sum);\n\n sum_entropy = lo_entropy[:-1] + hi_entropy[:-1];\n sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf\n entry = np.argmin(sum_entropy);\n return 2**((histogram_values[entry] + histogram_values[entry+1]) / 2);", "response": "The Kapur Sahoo & Wong method of thresholding adapted to log - space."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_maximum_correlation_threshold(image, mask = None, bins = 256):\n '''Return the maximum correlation threshold of the image\n \n image - image to be thresholded\n \n mask - mask of relevant pixels\n \n bins - # of value bins to use\n \n This is an implementation of the maximum correlation threshold as\n described in Padmanabhan, \"A novel algorithm for optimal image thresholding\n of biological data\", Journal of Neuroscience Methods 193 (2010) p 380-384\n '''\n \n if mask is not None:\n image = image[mask]\n image = image.ravel()\n nm = len(image)\n if nm == 0:\n return 0\n \n #\n # Bin the image\n #\n min_value = np.min(image)\n max_value = np.max(image)\n if min_value == max_value:\n return min_value\n image = ((image - min_value) * (bins - 1) / \n (max_value - min_value)).astype(int)\n histogram = np.bincount(image)\n #\n # Compute (j - mean) and (j - mean) **2\n mean_value = np.mean(image)\n diff = np.arange(len(histogram)) - mean_value\n diff2 = diff * diff\n ndiff = histogram * diff\n ndiff2 = histogram * diff2\n #\n # This is the sum over all j of (j-mean)**2. It's a constant that could\n # be factored out, but I follow the method and use it anyway.\n #\n sndiff2 = np.sum(ndiff2) \n #\n # Compute the cumulative sum from i to m which is the cumsum at m\n # minus the cumsum at i-1\n cndiff = np.cumsum(ndiff)\n numerator = np.hstack([[cndiff[-1]], cndiff[-1] - cndiff[:-1]])\n #\n # For the bottom, we need (Nm - Ni) * Ni / Nm\n #\n ni = nm - np.hstack([[0], np.cumsum(histogram[:-1])]) # number of pixels above i-1\n denominator = np.sqrt(sndiff2 * (nm - ni) * ni / nm)\n #\n mct = numerator / denominator\n mct[denominator == 0] = 0\n my_bin = np.argmax(mct)-1\n return min_value + my_bin * (max_value - min_value) / (bins - 1)", "response": "Return the maximum correlation threshold of the image"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncomputing the log - transformed variance of foreground and background images.", "response": "def weighted_variance(image, mask, binary_image):\n \"\"\"Compute the log-transformed variance of foreground and background\n \n image - intensity image used for thresholding\n \n mask - mask of ignored pixels\n \n binary_image - binary image marking foreground and background\n \"\"\"\n if not np.any(mask):\n return 0\n #\n # Clamp the dynamic range of the foreground\n #\n minval = np.max(image[mask])/256\n if minval == 0:\n return 0\n \n fg = np.log2(np.maximum(image[binary_image & mask], minval))\n bg = np.log2(np.maximum(image[(~ binary_image) & mask], minval))\n nfg = np.product(fg.shape)\n nbg = np.product(bg.shape)\n if nfg == 0:\n return np.var(bg)\n elif nbg == 0:\n return np.var(fg)\n else:\n return (np.var(fg) * nfg + np.var(bg)*nbg) / (nfg+nbg)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef sum_of_entropies(image, mask, binary_image):\n mask=mask.copy()\n mask[np.isnan(image)] = False\n if not np.any(mask):\n return 0\n #\n # Clamp the dynamic range of the foreground\n #\n minval = np.max(image[mask])/256\n if minval == 0:\n return 0\n clamped_image = image.copy()\n clamped_image[clamped_image < minval] = minval\n #\n # Smooth image with -8 bits of noise\n #\n image = smooth_with_noise(clamped_image, 8)\n im_min = np.min(image)\n im_max = np.max(image)\n #\n # Figure out the bounds for the histogram\n #\n upper = np.log2(im_max)\n lower = np.log2(im_min)\n if upper == lower:\n # All values are the same, answer is log2 of # of pixels\n return math.log(np.sum(mask),2) \n #\n # Create log-transformed lists of points in the foreground and background\n # \n fg = image[binary_image & mask]\n bg = image[(~ binary_image) & mask]\n if len(fg) == 0 or len(bg) == 0:\n return 0\n log_fg = np.log2(fg)\n log_bg = np.log2(bg)\n #\n # Make these into histograms\n hfg = numpy_histogram(log_fg, 256, range=(lower,upper))[0]\n hbg = numpy_histogram(log_bg, 256, range=(lower,upper))[0]\n #hfg = scipy.ndimage.histogram(log_fg,lower,upper,256)\n #hbg = scipy.ndimage.histogram(log_bg,lower,upper,256)\n #\n # Drop empty bins\n #\n hfg = hfg[hfg>0]\n hbg = hbg[hbg>0]\n if np.product(hfg.shape) == 0:\n hfg = np.ones((1,),int)\n if np.product(hbg.shape) == 0:\n hbg = np.ones((1,),int)\n #\n # Normalize\n #\n hfg = hfg.astype(float) / float(np.sum(hfg))\n hbg = hbg.astype(float) / float(np.sum(hbg))\n #\n # Compute sum of entropies\n #\n return np.sum(hfg * np.log2(hfg)) + np.sum(hbg*np.log2(hbg))", "response": "Compute the entropy of the entropies of the image."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef log_transform(image):\n '''Renormalize image intensities to log space\n \n Returns a tuple of transformed image and a dictionary to be passed into\n inverse_log_transform. The minimum and maximum from the dictionary\n can be applied to an image by the inverse_log_transform to \n convert it back to its former intensity values.\n '''\n orig_min, orig_max = scipy.ndimage.extrema(image)[:2]\n #\n # We add 1/2 bit noise to an 8 bit image to give the log a bottom\n #\n limage = image.copy()\n noise_min = orig_min + (orig_max-orig_min)/256.0+np.finfo(image.dtype).eps\n limage[limage < noise_min] = noise_min\n d = { \"noise_min\":noise_min}\n limage = np.log(limage)\n log_min, log_max = scipy.ndimage.extrema(limage)[:2]\n d[\"log_min\"] = log_min\n d[\"log_max\"] = log_max\n return stretch(limage), d", "response": "Renormalize image intensities to log space\n \n Returns a tuple of transformed image and a dictionary to be passed into inverse_log_transform."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nrank the image in order of the corresponding pixel in the image.", "response": "def rank_order(image, nbins=None):\n \"\"\"Return an image of the same shape where each pixel has the\n rank-order value of the corresponding pixel in the image.\n The returned image's elements are of type np.uint32 which\n simplifies processing in C code.\n \"\"\"\n flat_image = image.ravel()\n sort_order = flat_image.argsort().astype(np.uint32)\n flat_image = flat_image[sort_order]\n sort_rank = np.zeros_like(sort_order)\n is_different = flat_image[:-1] != flat_image[1:]\n np.cumsum(is_different, out=sort_rank[1:])\n original_values = np.zeros((sort_rank[-1]+1,),image.dtype)\n original_values[0] = flat_image[0]\n original_values[1:] = flat_image[1:][is_different] \n int_image = np.zeros_like(sort_order)\n int_image[sort_order] = sort_rank\n if nbins is not None:\n max_ranked_data = np.max(int_image)\n while max_ranked_data >= nbins:\n #\n # Decimate the bins until there are fewer than nbins\n #\n hist = np.bincount(int_image)\n #\n # Rank the bins from lowest count to highest\n order = np.argsort(hist)\n #\n # find enough to maybe decimate to nbins\n #\n candidates = order[:max_ranked_data+2-nbins]\n to_delete = np.zeros(max_ranked_data+2, bool)\n to_delete[candidates] = True\n #\n # Choose candidates that are either not next to others\n # or have an even index so as not to delete adjacent bins\n #\n td_mask = to_delete[:-1] & (\n ((np.arange(max_ranked_data+1) & 2) == 0) |\n (~ to_delete[1:]))\n if td_mask[0]:\n td_mask[0] = False\n #\n # A value to be deleted has the same index as the following\n # value and the two end up being merged\n #\n rd_translation = np.cumsum(~td_mask)-1\n #\n # Translate the rankings to the new space\n #\n int_image = rd_translation[int_image]\n #\n # Eliminate the bins with low counts\n #\n original_values = original_values[~td_mask]\n max_ranked_data = len(original_values)-1\n \n return (int_image.reshape(image.shape), original_values)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nnormalize the intensities of each object to the [ 0 1 ) range.", "response": "def normalized_per_object(image, labels):\n \"\"\"Normalize the intensities of each object to the [0, 1] range.\"\"\"\n nobjects = labels.max()\n objects = np.arange(nobjects + 1)\n lmin, lmax = scind.extrema(image, labels, objects)[:2]\n # Divisor is the object's max - min, or 1 if they are the same.\n divisor = np.ones((nobjects + 1,))\n divisor[lmax > lmin] = (lmax - lmin)[lmax > lmin]\n return (image - lmin[labels]) / divisor[labels]"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef cooccurrence(quantized_image, labels, scale_i=3, scale_j=0):\n labels = labels.astype(int)\n nlevels = quantized_image.max() + 1\n nobjects = labels.max()\n if scale_i < 0:\n scale_i = -scale_i\n scale_j = -scale_j\n if scale_i == 0 and scale_j > 0:\n image_a = quantized_image[:, :-scale_j]\n image_b = quantized_image[:, scale_j:]\n labels_ab = labels_a = labels[:, :-scale_j]\n labels_b = labels[:, scale_j:]\n elif scale_i > 0 and scale_j == 0:\n image_a = quantized_image[:-scale_i, :]\n image_b = quantized_image[scale_i:, :]\n labels_ab = labels_a = labels[:-scale_i, :]\n labels_b = labels[scale_i:, :]\n elif scale_i > 0 and scale_j > 0:\n image_a = quantized_image[:-scale_i, :-scale_j]\n image_b = quantized_image[scale_i:, scale_j:]\n labels_ab = labels_a = labels[:-scale_i, :-scale_j]\n labels_b = labels[scale_i:, scale_j:]\n else:\n # scale_j should be negative\n image_a = quantized_image[:-scale_i, -scale_j:]\n image_b = quantized_image[scale_i:, :scale_j]\n labels_ab = labels_a = labels[:-scale_i, -scale_j:]\n labels_b = labels[scale_i:, :scale_j]\n equilabel = ((labels_a == labels_b) & (labels_a > 0))\n if np.any(equilabel):\n\n Q = (nlevels*nlevels*(labels_ab[equilabel]-1)+\n nlevels*image_a[equilabel]+image_b[equilabel])\n R = np.bincount(Q)\n if R.size != nobjects*nlevels*nlevels:\n S = np.zeros(nobjects*nlevels*nlevels-R.size)\n R = np.hstack((R, S))\n P = R.reshape(nobjects, nlevels, nlevels)\n pixel_count = fix(scind.sum(equilabel, labels_ab,\n np.arange(nobjects, dtype=np.int32)+1))\n pixel_count = np.tile(pixel_count[:,np.newaxis,np.newaxis],\n (1,nlevels,nlevels))\n return (P.astype(float) / pixel_count.astype(float), nlevels)\n else:\n return np.zeros((nobjects, nlevels, nlevels)), nlevels", "response": "Calculates the co - occurrence matrix for all the objects in the image."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsumming variance ( error in Haralick s original paper here.", "response": "def H7(self):\n \"Sum variance (error in Haralick's original paper here).\"\n h6 = np.tile(self.H6(), (self.rlevels2.shape[1], 1)).transpose()\n return (((self.rlevels2 + 2) - h6) ** 2 * self.p_xplusy).sum(1)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nconstructing a lookup table of the sum - of - factorial part of the radial polynomial of the zernike indexes passed in.", "response": "def construct_zernike_lookuptable(zernike_indexes):\n \"\"\"Return a lookup table of the sum-of-factorial part of the radial\n polynomial of the zernike indexes passed\n \n zernike_indexes - an Nx2 array of the Zernike polynomials to be\n computed.\n \"\"\"\n n_max = np.max(zernike_indexes[:,0])\n factorial = np.ones((1 + n_max,), dtype=float)\n factorial[1:] = np.cumproduct(np.arange(1, 1 + n_max, dtype=float))\n width = int(n_max//2 + 1)\n lut = np.zeros((zernike_indexes.shape[0],width), dtype=float)\n for idx, (n, m) in enumerate(zernike_indexes):\n alt = 1\n npmh = (n+m)//2\n nmmh = (n-m)//2\n for k in range(0,nmmh+1):\n lut[idx,k] = \\\n (alt * factorial[n-k] /\n (factorial[k]*factorial[npmh-k]*factorial[nmmh-k]))\n alt = -alt\n return lut"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nconstructs the zerike polynomials for all objects in an image.", "response": "def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None):\n \"\"\"Return the zerike polynomials for all objects in an image\n \n x - the X distance of a point from the center of its object\n y - the Y distance of a point from the center of its object\n zernike_indexes - an Nx2 array of the Zernike polynomials to be computed.\n mask - a mask with same shape as X and Y of the points to consider\n weight - weightings of points with the same shape as X and Y (default\n weight on each point is 1).\n \n returns a height x width x N array of complex numbers which are the\n e^i portion of the sine and cosine of the Zernikes\n \"\"\"\n if x.shape != y.shape:\n raise ValueError(\"X and Y must have the same shape\")\n if mask is None:\n pass\n elif mask.shape != x.shape:\n raise ValueError(\"The mask must have the same shape as X and Y\")\n else:\n x = x[mask]\n y = y[mask]\n if weight is not None:\n weight = weight[mask]\n lut = construct_zernike_lookuptable(zernike_indexes) # precompute poly. coeffs.\n nzernikes = zernike_indexes.shape[0]\n # compute radii\n r_square = np.square(x) # r_square = x**2\n np.add(r_square, np.square(y), out=r_square) # r_square = x**2 + y**2\n # z = y + 1j*x\n # each Zernike polynomial is poly(r)*(r**m * np.exp(1j*m*phi)) ==\n # poly(r)*(y + 1j*x)**m\n z = np.empty(x.shape, np.complex)\n np.copyto(z.real, y)\n np.copyto(z.imag, x)\n # preallocate buffers\n s = np.empty_like(x)\n zf = np.zeros((nzernikes,) + x.shape, np.complex)\n z_pows = {}\n for idx, (n, m) in enumerate(zernike_indexes):\n s[:]=0\n if not m in z_pows:\n if m == 0:\n z_pows[m] = np.complex(1.0)\n else:\n z_pows[m] = z if m == 1 else (z ** m)\n z_pow = z_pows[m]\n # use Horner scheme\n for k in range((n-m)//2+1):\n s *= r_square\n s += lut[idx, k]\n s[r_square>1]=0\n if weight is not None:\n s *= weight.astype(s.dtype)\n if m == 0:\n np.copyto(zf[idx], s) # zf[idx] = s\n else:\n np.multiply(s, z_pow, out=zf[idx]) # zf[idx] = s*exp_term\n \n if mask is None:\n result = zf.transpose( tuple(range(1, 1+x.ndim)) + (0, ))\n else:\n result = np.zeros( mask.shape + (nzernikes,), np.complex)\n result[mask] = zf.transpose( tuple(range(1, 1 + x.ndim)) + (0, ))\n return result"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef score_zernike(zf, radii, labels, indexes=None):\n if indexes is None:\n indexes = np.arange(1,np.max(labels)+1,dtype=np.int32)\n else:\n indexes = np.array(indexes, dtype=np.int32)\n radii = np.asarray(radii, dtype=float)\n n = radii.size\n k = zf.shape[2]\n score = np.zeros((n,k))\n if n == 0:\n return score\n areas = np.square(radii)\n areas *= np.pi\n for ki in range(k):\n zfk=zf[:,:,ki]\n real_score = scipy.ndimage.sum(zfk.real,labels,indexes)\n real_score = fixup_scipy_ndimage_result(real_score)\n \n imag_score = scipy.ndimage.sum(zfk.imag,labels,indexes)\n imag_score = fixup_scipy_ndimage_result(imag_score)\n # one_score = np.sqrt(real_score**2+imag_score**2) / areas\n np.square(real_score, out=real_score)\n np.square(imag_score, out=imag_score)\n one_score = real_score + imag_score\n np.sqrt(one_score, out=one_score)\n one_score /= areas\n score[:,ki] = one_score\n return score", "response": "Score the output of construct_zernike_polynomials\n \n ."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning a list of all Zernike indexes up to the given limit", "response": "def get_zernike_indexes(limit=10):\n \"\"\"Return a list of all Zernike indexes up to the given limit\n \n limit - return all Zernike indexes with N less than this limit\n \n returns an array of 2-tuples. Each tuple is organized as (N,M).\n The Zernikes are stored as complex numbers with the real part\n being (N,M) and the imaginary being (N,-M)\n \"\"\"\n def zernike_indexes_iter(n_max):\n for n in range(0, n_max):\n for m in range(n%2, n+1, 2):\n yield n\n yield m\n\n z_ind = np.fromiter(zernike_indexes_iter(limit), np.intc)\n z_ind = z_ind.reshape( (len(z_ind) // 2, 2) )\n return z_ind"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef propagate(image, labels, mask, weight):\n if image.shape != labels.shape:\n raise ValueError(\"Image shape %s != label shape %s\" % (repr(image.shape), repr(labels.shape)))\n if image.shape != mask.shape:\n raise ValueError(\"Image shape %s != mask shape %s\" % (repr(image.shape), repr(mask.shape)))\n labels_out = np.zeros(labels.shape, np.int32)\n distances = -np.ones(labels.shape, np.float64)\n distances[labels > 0] = 0\n labels_and_mask = np.logical_and(labels != 0, mask)\n coords = np.argwhere(labels_and_mask)\n i1,i2 = _propagate.convert_to_ints(0.0)\n ncoords = coords.shape[0]\n pq = np.column_stack((np.ones((ncoords,), int) * i1, np.ones((ncoords,),int) * i2, labels[labels_and_mask], coords))\n _propagate.propagate(np.ascontiguousarray(image, np.float64), np.ascontiguousarray(pq,np.int32), np.ascontiguousarray(mask, np.int8), labels_out, distances, float(weight))\n labels_out[labels > 0] = labels[labels > 0]\n return labels_out, distances", "response": "Propagate the labels to the nearest pixels\n \n "} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef slow_reduction_transfer(ii, j, idx, count, x, u, v, c):\n '''Perform the reduction transfer step from the Jonker-Volgenant algorithm\n \n The data is input in a ragged array in terms of \"i\" structured as a\n vector of values for each i,j combination where:\n \n ii - the i to be reduced\n j - the j-index of every entry\n idx - the index of the first entry for each i\n count - the number of entries for each i\n x - the assignment of j to i\n u - the dual variable \"u\" which will be updated. It should be\n initialized to zero for the first reduction transfer.\n v - the dual variable \"v\" which will be reduced in-place\n c - the cost for each entry.\n \n The code described in the paper is:\n \n for each assigned row i do\n begin\n j1:=x[i]; u=min {c[i,j]-v[j] | j=1..n, j != j1};\n v[j1]:=v[j1]-(u-u[i]);\n u[i] = u;\n end;\n \n The authors note that reduction transfer can be applied in later stages\n of the algorithm but does not seem to provide a substantial benefit\n in speed.\n '''\n for i in ii:\n j1 = x[i]\n jj = j[idx[i]:(idx[i]+count[i])]\n uu = np.min((c[idx[i]:(idx[i]+count[i])] - v[jj])[jj != j1])\n v[j1] = v[j1] - uu + u[i]\n u[i] = uu", "response": "Perform the reduction transfer step from the Jonker - Volgenant algorithm."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef slow_augmenting_row_reduction(n, ii, jj, idx, count, x, y, u, v, c):\n '''Perform the augmenting row reduction step from the Jonker-Volgenaut algorithm\n \n n - the number of i and j in the linear assignment problem\n ii - the unassigned i\n jj - the j-index of every entry in c\n idx - the index of the first entry for each i\n count - the number of entries for each i\n x - the assignment of j to i\n y - the assignment of i to j\n u - the dual variable \"u\" which will be updated. It should be\n initialized to zero for the first reduction transfer.\n v - the dual variable \"v\" which will be reduced in-place\n c - the cost for each entry.\n \n returns the new unassigned i\n '''\n \n #######################################\n #\n # From Jonker:\n #\n # procedure AUGMENTING ROW REDUCTION;\n # begin\n # LIST: = {all unassigned rows};\n # for all i in LIST do\n # repeat\n # ul:=min {c[i,j]-v[j] for j=l ...n};\n # select j1 with c [i,j 1] - v[j 1] = u1;\n # u2:=min {c[i,j]-v[j] for j=l ...n,j< >jl} ;\n # select j2 with c [i,j2] - v [j2] = u2 and j2 < >j 1 ;\n # u[i]:=u2;\n # if ul 0 then x [k]:=0; x[i]:=jl; y [ j l ] : = i ; i:=k\n # until ul =u2 (* no reduction transfer *) or k=0 i~* augmentation *)\n # end\n ii = list(ii)\n k = 0\n limit = len(ii)\n free = []\n while k < limit:\n i = ii[k]\n k += 1\n j = jj[idx[i]:(idx[i] + count[i])]\n uu = c[idx[i]:(idx[i] + count[i])] - v[j]\n order = np.lexsort([uu])\n u1, u2 = uu[order[:2]]\n j1,j2 = j[order[:2]]\n i1 = y[j1]\n if u1 < u2:\n v[j1] = v[j1] - u2 + u1\n elif i1 != n:\n j1 = j2\n i1 = y[j1]\n if i1 != n:\n if u1 < u2:\n k -= 1\n ii[k] = i1\n else:\n free.append(i1)\n x[i] = j1\n y[j1] = i\n return np.array(free,np.uint32)", "response": "This function performs the augmenting row reduction step from the Jonker - Volgenaut algorithm."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\naugmenting the unassigned i and j by a given number of variables.", "response": "def slow_augment(n, ii, jj, idx, count, x, y, u, v, c):\n '''Perform the augmentation step to assign unassigned i and j\n \n n - the # of i and j, also the marker of unassigned x and y\n ii - the unassigned i\n jj - the ragged arrays of j for each i\n idx - the index of the first j for each i\n count - the number of j for each i\n x - the assignments of j for each i\n y - the assignments of i for each j\n u,v - the dual variables\n c - the costs\n '''\n \n ##################################################\n #\n # Augment procedure: from the Jonker paper.\n #\n # Note:\n # cred [i,j] = c [i,j] - u [i] - v[j]\n #\n # procedure AUGMENT;\n # begin\n # for all unassigned i* do\n # begin\n # for j:= 1 ... n do \n # begin d[j] := c[i*,j] - v[j] ; pred[j] := i* end;\n # READY: = { ) ; SCAN: = { } ; TODO: = { 1 ... n} ;\n # repeat\n # if SCAN = { } then\n # begin\n # u = min {d[j] for j in TODO} ; \n # SCAN: = {j | d[j] = u} ;\n # TODO: = TODO - SCAN;\n # for j in SCAN do if y[j]==0 then go to augment\n # end;\n # select any j* in SCAN;\n # i := y[j*]; SCAN := SCAN - {j*} ; READY: = READY + {j*} ;\n # for all j in TODO do if u + cred[i,j] < d[j] then\n # begin\n # d[j] := u + cred[i,j]; pred[j] := i;\n # if d[j] = u then\n # if y[j] is unassigned then go to augment else\n # begin SCAN: = SCAN + {j} ; TODO: = TODO - {j} end\n # end\n # until false; (* repeat always ends with go to augment *)\n #augment:\n # (* price updating *)\n # for k in READY do v[k]: = v[k] + d[k] - u;\n # (* augmentation *)\n # repeat\n # i: = pred[j]; y[ j ] := i ; k:=j; j:=x[i]; x[i]:= k\n # until i = i*\n # end\n #end\n inf = np.sum(c) + 1\n d = np.zeros(n)\n cc = np.zeros((n,n))\n cc[:,:] = inf\n for i in range(n):\n cc[i,jj[idx[i]:(idx[i]+count[i])]] = c[idx[i]:(idx[i]+count[i])]\n c = cc\n for i in ii:\n print(\"Processing i=%d\" % i)\n j = jj[idx[i]:(idx[i] + count[i])]\n d = c[i,:] - v\n pred = np.ones(n, int) * i\n on_deck = []\n ready = []\n scan = []\n to_do = list(range(n))\n try:\n while True:\n print(\"Evaluating i=%d, n_scan = %d\" % (i, len(scan)))\n if len(scan) == 0:\n ready += on_deck\n on_deck = []\n umin = np.min([d[jjj] for jjj in to_do])\n print(\"umin = %f\" % umin)\n scan = [jjj for jjj in to_do if d[jjj] == umin]\n to_do = [jjj for jjj in to_do if d[jjj] != umin]\n for j1 in scan:\n if y[j1] == n:\n raise StopIteration()\n j1 = scan[0]\n iii = y[j1]\n print(\"Consider replacing i=%d, j=%d\" % (iii, j1))\n scan = scan[1:]\n on_deck += [j1]\n u1 = c[iii, j1] - v[j1] - umin\n for j1 in list(to_do):\n h = c[iii, j1] - v[j1] - u1\n print(\"Consider j=%d as replacement, c[%d,%d]=%f,v[%d]=%f,h=%f, d[j]= %f\" % (j1,iii,j1,c[iii,j1],j1,v[j1],h,d[j1]))\n if h < d[j1]:\n print(\"Add to chain\")\n pred[j1] = iii\n if h == umin:\n if y[j1] == n:\n raise StopIteration()\n print(\"Add to scan\")\n scan += [j1]\n to_do.remove(j1)\n d[j1] = h\n\n except StopIteration:\n # Augment\n print(\"Augmenting %d\" % j1)\n for k in ready:\n temp = v[k]\n v[k] = v[k] + d[k] - umin\n print(\"v[%d] %f -> %f\" % (k, temp, v[k]))\n while True:\n iii = pred[j1]\n print(\"y[%d] %d -> %d\" % (j1, y[j1], iii))\n y[j1] = iii\n j1, x[iii] = x[iii], j1\n if iii == i:\n break\n #\n # Re-establish slackness since we didn't pay attention to u\n #\n for i in range(n):\n j = x[i]\n u[i] = c[i,j] - v[j]"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef estimate_parameters_rippe(\n self, max_dist_kb, size_bin_kb, display_graph\n ):\n \"\"\"\n estimation by least square optimization of Rippe parameters on the\n experimental data\n :param max_dist_kb:\n :param size_bin_kb:\n \"\"\"\n logger.info(\"estimation of the parameters of the model\")\n self.bins = np.arange(\n size_bin_kb, max_dist_kb + size_bin_kb, size_bin_kb\n )\n\n self.mean_contacts = np.zeros_like(self.bins, dtype=np.float32)\n self.dict_collect = dict()\n self.gpu_vect_frags.copy_from_gpu()\n epsi = self.mean_value_trans\n for k in self.bins:\n self.dict_collect[k] = []\n for i in range(0, self.n_frags // 10):\n # print \"frag i = \", i\n start = self.sparse_matrix.indptr[i]\n end = self.sparse_matrix.indptr[i + 1]\n id_j = self.sparse_matrix.indices[start:end]\n data = self.sparse_matrix.data[start:end]\n\n info_i = self.np_sub_frags_2_frags[i]\n init_id_fi = int(info_i[0])\n # pos_i = self.S_o_A_frags[\"pos\"][init_id_fi]\n id_c_i = self.S_o_A_frags[\"id_c\"][init_id_fi]\n s_i = (\n self.S_o_A_frags[\"start_bp\"][init_id_fi] / 1000.0\n + self.np_sub_frags_2_frags[i][1]\n )\n len_kb_c_i = self.S_o_A_frags[\"l_cont_bp\"][init_id_fi] / 1000\n # local_bins = np.arange(size_bin_kb, min(len_kb_c_i, max_dist_kb)\n # + size_bin_kb, size_bin_kb) local_storage =\n # np.zeros_like(local_bins, dtype=np.int32)\n #\n # for fj, dj in zip(id_j, data):\n # info_j = self.np_sub_frags_2_frags[fj]\n # init_id_fj = info_j[0]\n # id_c_j = self.S_o_A_frags['id_c'][init_id_fj]\n # if id_c_i == id_c_j:\n # pos_j = self.S_o_A_frags['pos'][init_id_fj]\n # s_j = self.S_o_A_frags['start_bp'][init_id_fj]/1000.0 +\n # self.np_sub_frags_2_frags[fj][1]\n # d = np.abs(s_i - s_j)\n # if d < max_dist_kb:\n # id_bin = d / size_bin_kb\n # local_storage[id_bin] += dj\n # # self.dict_collect[self.bins[id_bin]].append(dj)\n # # we have to add also the zeros\n # for bin, val in zip(local_bins, local_storage):\n # # print \"bin = \", bin\n # self.dict_collect[bin].append(val)\n\n if size_bin_kb < len_kb_c_i:\n # local_bins = np.arange(size_bin_kb, min(len_kb_c_i,\n # max_dist_kb) + size_bin_kb, size_bin_kb)\n local_bins = np.arange(\n size_bin_kb, max_dist_kb + size_bin_kb, size_bin_kb\n )\n local_storage = np.zeros_like(local_bins, dtype=np.int32)\n\n for fj, dj in zip(id_j, data):\n info_j = self.np_sub_frags_2_frags[fj]\n init_id_fj = int(info_j[0])\n id_c_j = self.S_o_A_frags[\"id_c\"][init_id_fj]\n if id_c_i == id_c_j:\n # pos_j = self.S_o_A_frags[\"pos\"][init_id_fj]\n s_j = (\n self.S_o_A_frags[\"start_bp\"][init_id_fj] / 1000.0\n + self.np_sub_frags_2_frags[fj][1]\n )\n d = np.abs(s_i - s_j)\n if d < max_dist_kb:\n id_bin = int(d / size_bin_kb)\n local_storage[id_bin] += dj\n # self.dict_collect[self.bins[id_bin]].append(dj)\n # we have to add also the zeros\n for bin, val in zip(local_bins, local_storage):\n # print \"bin = \", bin\n self.dict_collect[bin].append(val)\n\n for id_bin in range(0, len(self.bins)):\n k = self.bins[id_bin]\n self.mean_contacts[id_bin] = np.mean(self.dict_collect[k])\n\n for id_bin in range(0, len(self.bins)):\n k = self.bins[id_bin]\n tmp = np.mean(self.dict_collect[k])\n if np.isnan(tmp) or tmp == 0:\n # if np.isnan(tmp):\n # if np.isnan(tmp):\n # print \"removing nan\"\n self.mean_contacts[id_bin] = np.nan\n else:\n self.mean_contacts[id_bin] = tmp + epsi\n\n self.mean_contacts_upd = []\n self.bins_upd = []\n\n for count, ele in zip(self.mean_contacts, self.bins):\n if not np.isnan(count):\n self.bins_upd.append(ele)\n self.mean_contacts_upd.append(count)\n\n self.bins_upd = np.array(self.bins_upd)\n # self.mean_contacts_upd.sort()\n # self.mean_contacts_upd.reverse()\n self.mean_contacts_upd = np.array(self.mean_contacts_upd)\n\n # self.mean_contacts_upd =\n # ndi.filters.gaussian_filter1d(self.mean_contacts_upd,\n # sigma=len(self.bins_upd) / 5.)\n\n # print \"size mean contacts vector = \", self.mean_contacts_upd.shape\n # print \"mean contacts vector = \", self.mean_contacts_upd\n\n p, self.y_estim = opti.estimate_param_rippe(\n self.mean_contacts_upd, self.bins_upd\n )\n ##########################################\n logger.info(\"p from estimate parameters = {}\".format(p))\n # p = list(p[0])\n # p[3] = 2\n # p = tuple(p)\n ##########################################\n fit_param = p\n logger.info(\"mean value trans = {}\".format(self.mean_value_trans))\n ##########################################\n logger.info(\"BEWARE!!! : I will lower mean value trans !!!\")\n self.mean_value_trans = self.mean_value_trans / 10.0\n ##########################################\n\n estim_max_dist = opti.estimate_max_dist_intra(\n fit_param, self.mean_value_trans\n )\n logger.info(\"estimate max dist cis trans = {}\".format(estim_max_dist))\n self.param_simu = self.setup_rippe_parameters(\n fit_param, estim_max_dist\n )\n self.param_simu_test = self.param_simu\n self.gpu_param_simu = cuda.mem_alloc(self.param_simu.nbytes)\n self.gpu_param_simu_test = cuda.mem_alloc(self.param_simu.nbytes)\n\n cuda.memcpy_htod(self.gpu_param_simu, self.param_simu)\n cuda.memcpy_htod(self.gpu_param_simu_test, self.param_simu_test)\n display_graph = False\n if display_graph:\n plt.figure()\n plt.loglog(self.bins_upd, self.mean_contacts_upd, \"-*b\")\n plt.loglog(self.bins_upd, self.y_estim, \"-*r\")\n plt.xlabel(\"genomic distance (kb)\")\n plt.ylabel(\"frequency of contact\")\n plt.title(\n r\"$\\mathrm{Frequency\\ of\\ contact\\ versus\\ genomic\\ distance\"\n \" \\(data\\ tricho test):}\\ slope=%.3f,\\ max\\ cis\\ distance(kb)\"\n \"=%.3f\\ d=%.3f\\ scale\\ factor=%.3f\\ $\"\n % (\n self.param_simu[\"slope\"],\n estim_max_dist,\n self.param_simu[\"d\"],\n self.param_simu[\"fact\"],\n )\n )\n plt.legend([\"obs\", \"fit\"])\n plt.savefig()\n plt.close()\n\n self.eval_likelihood_init()", "response": "estimate the parameters of the Rippe model"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nestimate parameters of the Rippe model", "response": "def estimate_parameters(self, max_dist_kb, size_bin_kb, display_graph):\n \"\"\"\n estimation by least square optimization of Rippe parameters on the\n experimental data\n :param max_dist_kb:\n :param size_bin_kb:\n \"\"\"\n logger.info(\"estimation of the parameters of the model\")\n self.bins = np.arange(\n size_bin_kb, max_dist_kb + size_bin_kb, size_bin_kb\n )\n\n self.mean_contacts = np.zeros_like(self.bins, dtype=np.float32)\n self.dict_collect = dict()\n self.gpu_vect_frags.copy_from_gpu()\n epsi = self.mean_value_trans\n for k in self.bins:\n self.dict_collect[k] = []\n for i in range(0, 2000):\n # print \"frag i = \", i\n start = self.sparse_matrix.indptr[i]\n end = self.sparse_matrix.indptr[i + 1]\n id_j = self.sparse_matrix.indices[start:end]\n data = self.sparse_matrix.data[start:end]\n\n info_i = self.np_sub_frags_2_frags[i]\n init_id_fi = info_i[0]\n # pos_i = self.S_o_A_frags[\"pos\"][init_id_fi]\n id_c_i = self.S_o_A_frags[\"id_c\"][init_id_fi]\n s_i = (\n self.S_o_A_frags[\"start_bp\"][init_id_fi] / 1000.0\n + self.np_sub_frags_2_frags[i][1]\n )\n len_kb_c_i = self.S_o_A_frags[\"l_cont_bp\"][init_id_fi] / 1000\n local_bins = np.arange(\n size_bin_kb,\n min(len_kb_c_i, max_dist_kb) + size_bin_kb,\n size_bin_kb,\n )\n local_storage = np.zeros_like(local_bins, dtype=np.int32)\n\n for fj, dj in zip(id_j, data):\n info_j = self.np_sub_frags_2_frags[fj]\n init_id_fj = info_j[0]\n id_c_j = self.S_o_A_frags[\"id_c\"][init_id_fj]\n if id_c_i == id_c_j:\n # pos_j = self.S_o_A_frags[\"pos\"][init_id_fj]\n s_j = (\n self.S_o_A_frags[\"start_bp\"][init_id_fj] / 1000.0\n + self.np_sub_frags_2_frags[fj][1]\n )\n d = np.abs(s_i - s_j)\n if d < max_dist_kb:\n id_bin = d / size_bin_kb\n local_storage[id_bin] += dj\n # self.dict_collect[self.bins[id_bin]].append(dj)\n # we have to add also the zeros\n for my_bin, val in zip(local_bins, local_storage):\n # print \"bin = \", bin\n self.dict_collect[my_bin].append(val)\n\n for id_bin in range(0, len(self.bins)):\n k = self.bins[id_bin]\n self.mean_contacts[id_bin] = np.mean(self.dict_collect[k])\n\n for id_bin in range(0, len(self.bins)):\n k = self.bins[id_bin]\n tmp = np.mean(self.dict_collect[k])\n if np.isnan(tmp) or tmp == 0:\n # if np.isnan(tmp):\n # if np.isnan(tmp):\n # print \"removing nan\"\n self.mean_contacts[id_bin] = np.nan\n else:\n self.mean_contacts[id_bin] = tmp + epsi\n\n self.mean_contacts_upd = []\n self.bins_upd = []\n\n for count, ele in zip(self.mean_contacts, self.bins):\n if not np.isnan(count):\n self.bins_upd.append(ele)\n self.mean_contacts_upd.append(count)\n\n self.bins_upd = np.array(self.bins_upd)\n self.mean_contacts_upd = np.array(self.mean_contacts_upd)\n\n # self.mean_contacts_upd =\n # ndi.filters.gaussian_filter1d(self.mean_contacts_upd,\n # sigma=len(self.bins_upd) / 5.)\n\n p, self.y_estim = nuis.estimate_param_hic(\n self.mean_contacts_upd, self.bins_upd\n )\n ##########################################\n fit_param = p.x\n ##########################################\n logger.info(\"mean value trans = {}\".format(self.mean_value_trans))\n ##########################################\n estim_max_dist = nuis.estimate_max_dist_intra(\n fit_param, self.mean_value_trans\n )\n logger.info(\"max distance cis/trans = {}\".format(estim_max_dist))\n ##########################################\n self.param_simu = self.setup_model_parameters(\n fit_param, estim_max_dist\n )\n self.gpu_param_simu = cuda.mem_alloc(self.param_simu.nbytes)\n self.gpu_param_simu_test = cuda.mem_alloc(self.param_simu.nbytes)\n\n cuda.memcpy_htod(self.gpu_param_simu, self.param_simu)\n\n if display_graph:\n plt.loglog(self.bins_upd, self.mean_contacts_upd, \"-*b\")\n plt.loglog(self.bins_upd, self.y_estim, \"-*r\")\n plt.xlabel(\"genomic distance (kb)\")\n plt.ylabel(\"frequency of contact\")\n plt.legend([\"obs\", \"fit\"])\n plt.show()"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef collapse_degenerate_markers(linkage_records):\n\n \"\"\"Group all markers with no genetic distance as distinct features\n to generate a BED file with.\n\n Simple example with sixteen degenerate markers:\n\n >>> marker_features = [\n ... ['36915_sctg_207_31842', 1, 0, 207, 31842],\n ... ['36941_sctg_207_61615', 1, 0, 207, 61615],\n ... ['36956_sctg_207_77757', 1, 0, 207, 77757],\n ... ['36957_sctg_207_78332', 1, 0, 207, 78332],\n ... ['36972_sctg_207_94039', 1, 0, 207, 94039],\n ... ['36788_sctg_207_116303', 1, 0.652, 207, 116303],\n ... ['36812_sctg_207_158925', 1, 1.25, 207, 158925],\n ... ['36819_sctg_207_165424', 1, 1.25, 207, 165424],\n ... ['36828_sctg_207_190813', 1, 1.25, 207, 190813],\n ... ['36830_sctg_207_191645', 1, 1.25, 207, 191645],\n ... ['36834_sctg_207_195961', 1, 1.25, 207, 195961],\n ... ['36855_sctg_207_233632', 1, 1.25, 207, 233632],\n ... ['36881_sctg_207_258658', 1, 1.25, 207, 258658],\n ... ['82072_sctg_486_41893', 1, 3.756, 486, 41893],\n ... ['85634_sctg_516_36614', 1,\t3.756, 516, 36614],\n ... ['85638_sctg_516_50582', 1, 3.756, 516, 50582]]\n\n >>> len(marker_features)\n 16\n\n >>> collapsed_features = collapse_degenerate_markers(marker_features)\n >>> len(collapsed_features)\n 5\n\n The degenerate features (identical linkage group, genetic distance and\n original scaffold) are collapsed into a region:\n\n >>> collapsed_features[0]\n [1, 31842, 94039, 207]\n\n The format is [linkage group, start, end, original scaffold].\n\n If a singleton (non-degenerate) feature is found, the region is simply\n a single point in the genome:\n\n >>> collapsed_features[1]\n [1, 116303, 116303, 207]\n\n so 'start' and 'end' are identical.\n\n Two markers are not considered degenerate if they belong to different\n original scaffolds, even if they are in terms of genetic linkage:\n\n >>> collapsed_features[2]\n [1, 158925, 258658, 207]\n >>> collapsed_features[3:]\n [[1, 41893, 41893, 486], [1, 36614, 50582, 516]]\n \"\"\"\n\n def degeneracy(linkage_record):\n linkage_group, genetic_distance, scaffold = (\n linkage_record[1],\n linkage_record[2],\n linkage_record[3],\n )\n return (linkage_group, genetic_distance, scaffold)\n\n degenerate_records = []\n for _, degenerate_group in itertools.groupby(\n linkage_records, key=degeneracy\n ):\n group_list = list(degenerate_group)\n start_record, end_record = group_list[0], group_list[-1]\n\n assert (start_record[1], start_record[2], start_record[3]) == (\n end_record[1],\n end_record[2],\n end_record[3],\n )\n\n start_position = start_record[-1]\n end_position = end_record[-1]\n\n scaffold = start_record[3]\n linkage_group = start_record[1]\n record = [linkage_group, start_position, end_position, scaffold]\n\n degenerate_records.append(record)\n\n return degenerate_records", "response": "Group all markers with no genetic distance as distinct features\n to generate a BED file with."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nconverting degenerate linkage records into ordered info_frags - like records for comparison purposes.", "response": "def linkage_group_ordering(linkage_records):\n \"\"\"Convert degenerate linkage records into ordered info_frags-like records\n for comparison purposes.\n\n Simple example:\n\n >>> linkage_records = [\n ... ['linkage_group_1', 31842, 94039, 'sctg_207'],\n ... ['linkage_group_1', 95303, 95303, 'sctg_207'],\n ... ['linkage_group_2', 15892, 25865, 'sctg_308'],\n ... ['linkage_group_2', 41893, 41893, 'sctg_486'],\n ... ['linkage_group_3', 36614, 50582, 'sctg_516'],\n ... ]\n >>> ordering = linkage_group_ordering(linkage_records)\n\n Each key of the record is a newly-formed 'scaffold' (linkage group):\n\n >>> sorted(ordering.keys())\n ['linkage_group_1', 'linkage_group_2', 'linkage_group_3']\n\n Records are in the form [init_contig, frag_id, start, end, orientation].\n Since fragment ids are meaningless in non-HiC contexts a negative\n identifier is set so it is understood that region was added due to\n linkage data (-1 is for recovering data after first-pass polishing and -2\n is for sequence insertions after long read based polishing).\n\n >>> ordering['linkage_group_1']\n [['sctg_207', -3, 31842, 94039, 1], ['sctg_207', -3, 95303, 95303, 1]]\n >>> ordering['linkage_group_2']\n [['sctg_308', -3, 15892, 25865, 1], ['sctg_486', -3, 41893, 41893, 1]]\n\n Orientations are always set to 1 by default.\n\n >>> ordering['linkage_group_3']\n [['sctg_516', -3, 36614, 50582, 1]]\n\n\n \"\"\"\n\n new_records = dict()\n\n for lg_name, linkage_group in itertools.groupby(\n linkage_records, operator.itemgetter(0)\n ):\n new_records[lg_name] = []\n for record in linkage_group:\n init_contig = record[-1]\n start = record[1]\n end = record[2]\n\n new_record = [init_contig, -3, start, end, 1]\n\n new_records[lg_name].append(new_record)\n\n return new_records"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef compare_orderings(info_frags_records, linkage_orderings):\n\n \"\"\"Given linkage groups and info_frags records, link pseudo-chromosomes to\n scaffolds based on the initial contig composition of each group. Because\n info_frags records are usually richer and may contain contigs not found\n in linkage groups, those extra sequences are discarded.\n\n Example with two linkage groups and two chromosomes:\n\n >>> linkage_orderings = {\n ... 'linkage_group_1': [\n ... ['sctg_516', -3, 36614, 50582, 1],\n ... ['sctg_486', -3, 41893, 41893, 1],\n ... ['sctg_486', -3, 50054, 62841, 1],\n ... ['sctg_207', -3, 31842, 94039, 1],\n ... ['sctg_558', -3, 51212, 54212, 1],\n ... ],\n ... 'linkage_group_2': [\n ... ['sctg_308', -3, 15892, 25865, 1],\n ... ['sctg_842', -3, 0, 8974, 1],\n ... ['sctg_994', -3, 0, 81213, 1],\n ... ],\n ... }\n >>> info_frags = {\n ... 'scaffold_A': [\n ... ['sctg_308', 996, 15892, 25865, 1],\n ... ['sctg_778', 1210, 45040, 78112, -1],\n ... ['sctg_842', 124, 0, 8974, 1],\n ... ],\n ... 'scaffold_B': [\n ... ['sctg_516', 5, 0, 38000, 1],\n ... ['sctg_486', 47, 42050, 49000, 1],\n ... ['sctg_1755', 878, 95001, 10844, -1],\n ... ['sctg_842', 126, 19000, 26084, 1],\n ... ['sctg_207', 705, 45500, 87056, 1],\n ... ],\n ... 'scaffold_C': [\n ... ['sctg_558', 745, 50045, 67851, 1],\n ... ['sctg_994', 12, 74201, 86010, -1],\n ... ],\n ... }\n >>> matching_pairs = compare_orderings(info_frags, linkage_orderings)\n >>> matching_pairs['scaffold_B']\n (3, 'linkage_group_1', {'sctg_558': 'sctg_207'})\n >>> matching_pairs['scaffold_A']\n (2, 'linkage_group_2', {'sctg_994': 'sctg_842'})\n\n \"\"\"\n\n scaffolds = info_frags_records.keys()\n linkage_groups = linkage_orderings.keys()\n\n best_matching_table = dict()\n\n for scaffold, linkage_group in itertools.product(\n scaffolds, linkage_groups\n ):\n lg_ordering = [\n init_contig\n for init_contig, _ in itertools.groupby(\n linkage_orderings[linkage_group], operator.itemgetter(0)\n )\n ]\n scaffold_ordering = [\n init_contig\n for init_contig, bin_group in itertools.groupby(\n info_frags_records[scaffold], operator.itemgetter(0)\n )\n if init_contig in lg_ordering\n ]\n\n overlap = set(lg_ordering).intersection(set(scaffold_ordering))\n missing_locations = dict()\n for missing_block in sorted(set(lg_ordering) - set(overlap)):\n for i, init_contig in enumerate(lg_ordering):\n if init_contig == missing_block:\n try:\n block_before = lg_ordering[i - 1]\n except IndexError:\n block_before = \"beginning\"\n missing_locations[missing_block] = block_before\n\n try:\n if len(overlap) > best_matching_table[scaffold][0]:\n best_matching_table[scaffold] = (\n len(overlap),\n linkage_group,\n missing_locations,\n )\n\n except KeyError:\n best_matching_table[scaffold] = (\n len(overlap),\n linkage_group,\n missing_locations,\n )\n\n return best_matching_table", "response": "Compare the orderings of info_frags records with the linkage_group_1 and linkage_group_2."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_missing_blocks(info_frags_records, matching_pairs, linkage_orderings):\n\n import logging\n import sys\n\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n touched_lgs = set()\n\n def record_length(key_record_tuple):\n return len(key_record_tuple[1])\n\n new_scaffolds = copy.deepcopy(info_frags_records)\n\n for scaffold_name in collections.OrderedDict(\n sorted(new_scaffolds.items(), key=record_length, reverse=True)\n ):\n scaffold = new_scaffolds[scaffold_name]\n new_scaffold = []\n corresponding_lg = matching_pairs[scaffold_name][1]\n if corresponding_lg in touched_lgs:\n continue\n else:\n touched_lgs.add(corresponding_lg)\n scaffold_block_names = {my_bin[0] for my_bin in scaffold}\n lg_block_names = [\n my_block[0] for my_block in linkage_orderings[corresponding_lg]\n ]\n\n touched_bins = set()\n\n for lg_block_name in lg_block_names:\n if lg_block_name in scaffold_block_names:\n for my_bin in scaffold:\n if tuple(my_bin) in new_scaffold:\n continue\n elif (\n my_bin[0] == lg_block_name\n or my_bin[0] not in lg_block_names\n ):\n new_scaffold.append(tuple(my_bin))\n touched_bins.add(tuple(my_bin))\n else:\n break\n for other_name, other_scaffold in new_scaffolds.items():\n if other_name == scaffold_name:\n continue\n i = 0\n for my_bin in other_scaffold:\n if tuple(my_bin) in new_scaffold:\n i += 1\n continue\n elif my_bin[0] == lg_block_name:\n moved_bin = tuple(other_scaffold.pop(i))\n new_scaffold.append(tuple(moved_bin))\n touched_bins.add(tuple(moved_bin))\n i -= 1\n i += 1\n\n for remaining_bin in scaffold:\n if tuple(remaining_bin) not in touched_bins:\n new_scaffold.append(tuple(remaining_bin))\n touched_bins.add(tuple(remaining_bin))\n\n if len(new_scaffold) > 0:\n new_scaffolds[scaffold_name] = new_scaffold\n\n return new_scaffolds", "response": "Get the missing blocks in a scaffold based on the genetic map order."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef parse_info_frags(info_frags):\n\n new_scaffolds = {}\n with open(info_frags, \"r\") as info_frags_handle:\n current_new_contig = None\n for line in info_frags_handle:\n if line.startswith(\">\"):\n current_new_contig = str(line[1:-1])\n new_scaffolds[current_new_contig] = []\n elif line.startswith(\"init_contig\"):\n pass\n else:\n (init_contig, id_frag, orientation, pos_start, pos_end) = str(\n line[:-1]\n ).split(\"\\t\")\n start = int(pos_start)\n end = int(pos_end)\n ori = int(orientation)\n fragid = int(id_frag)\n assert start < end\n assert ori in {-1, 1}\n new_scaffolds[current_new_contig].append(\n [init_contig, fragid, start, end, ori]\n )\n\n return new_scaffolds", "response": "Import an info_frags. txt file and return a dictionary where each key is a newly formed scaffold and each value is a list of bins and their their\n origin on the initial scaffolding."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef parse_bed(bed_file):\n\n new_scaffolds = {}\n with open(bed_file) as bed_handle:\n for line in bed_handle:\n chrom, start, end, query, qual, strand = line.split()[:7]\n if strand == \"+\":\n ori = 1\n elif strand == \"-\":\n ori = -1\n else:\n raise ValueError(\n \"Error when parsing strand \"\n \"orientation: {}\".format(strand)\n )\n\n if int(qual) > 0:\n bed_bin = [query, -2, int(start), int(end), ori]\n try:\n new_scaffolds[chrom].append(bed_bin)\n except KeyError:\n new_scaffolds[chrom] = [bed_bin]\n\n return new_scaffolds", "response": "Parse a BED file and return a scaffold dictionary"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncorrects the scaffolds for the given scaffolds.", "response": "def correct_scaffolds(scaffolds, corrector):\n \"\"\"Unfinished\n \"\"\"\n\n new_scaffolds = {}\n\n def are_overlapping(bin1, bin2):\n \"\"\"Check for overlapping regions between two regions - necessary\n requirement before potentially merging\n \"\"\"\n\n if bin2 is None:\n return False\n\n init1, _, start1, end1, _ = bin1\n init2, _, start2, end2, _ = bin2\n\n if init1 != init2:\n return False\n else:\n return (start2 <= start1 <= end2) or (start1 <= start2 <= end1)\n\n def merge_bins(bin1, bin2, ignore_ori=True):\n \"\"\"Painstakingly check for every edge case in order to properly merge\n two overlapping bins.\n \"\"\"\n init1, _, start1, end1, ori1 = bin1\n init2, _, start2, end2, ori2 = bin2\n\n assert init1 == init2\n start = min(start1, start2)\n end = max(end1, end2)\n if ignore_ori:\n ori = ori1\n else:\n len1 = end1 - start1\n len2 = end2 - start2\n if len2 > len1:\n ori = ori2\n else:\n ori = ori1\n\n new_bin = [init1, -3, start, end, ori]\n return new_bin\n\n corrector_bins = copy.deepcopy(corrector)\n\n for name, scaffold in scaffolds.items():\n new_scaffold = []\n\n for _, blocks in itertools.groupby(scaffold, operator.itemgetter(0)):\n merged_bin = None\n while \"Reading blocks\":\n try:\n my_bin = next(blocks)\n if are_overlapping(my_bin, merged_bin):\n merged_bin = merge_bins(my_bin, merged_bin)\n continue\n else:\n if merged_bin is not None:\n new_scaffold.append(merged_bin)\n merged_bin = my_bin\n i = 0\n for correct_bin in corrector_bins:\n if are_overlapping(my_bin, correct_bin):\n merged_bin = merge_bins(my_bin, correct_bin)\n corrector_bins.pop(i)\n i -= 1\n i += 1\n except StopIteration:\n if merged_bin is not None:\n new_scaffold.append(merged_bin)\n break\n\n new_scaffolds[name] = new_scaffold\n\n return new_scaffolds"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef format_info_frags(info_frags):\n if isinstance(info_frags, dict):\n return info_frags\n else:\n try:\n scaffolds = parse_info_frags(info_frags)\n return scaffolds\n except OSError:\n print(\"Error when opening info_frags.txt\")\n raise", "response": "A function to seamlessly run on either scaffolds or info_frags. txt files without having to check the input first."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef plot_info_frags(scaffolds):\n\n \"\"\"A crude way to visualize new scaffolds according to their origin on the\n initial scaffolding. Each scaffold spawns a new plot. Orientations are\n represented by different colors.\n \"\"\"\n\n scaffolds = format_info_frags(scaffolds)\n for name, scaffold in scaffolds.items():\n plt.figure()\n xs = range(len(scaffold))\n color = []\n names = {}\n ys = []\n for my_bin in scaffold:\n current_color = \"r\" if my_bin[4] > 0 else \"g\"\n color += [current_color]\n name = my_bin[0]\n if name in names:\n ys.append(names[name])\n else:\n names[name] = len(names)\n ys.append(names[name])\n plt.scatter(xs, ys, c=color)\n plt.show()", "response": "A crude way to visualize new scaffolds according to their origin on the\n initial scaffolding. Each scaffold spawns a new plot."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef remove_spurious_insertions(scaffolds):\n\n \"\"\"Remove all bins whose left and right neighbors belong to the same,\n different scaffold.\n\n Example with three such insertions in two different scaffolds:\n\n >>> scaffolds = {\n ... \"scaffold1\": [\n ... [\"contig1\", 0, 0, 100, 1],\n ... [\"contig1\", 1, 100, 200, 1],\n ... [\"contig23\", 53, 1845, 2058, -1], # <-- insertion\n ... [\"contig1\", 4, 254, 408, 1],\n ... [\"contig1\", 7, 805, 1253, 1],\n ... [\"contig5\", 23, 1500, 1605, -1],\n ... [\"contig65\", 405, 32145, 45548, -1], # <-- insertion\n ... [\"contig5\", 22, 1385, 1499, -1],\n ... ],\n ... \"scaffold2\": [\n ... [\"contig8\", 0, 0, 250, 1],\n ... [\"contig17\", 2454, 8754, -1], # <-- insertion\n ... [\"contig8\", 2, 320, 480, 1],\n ... ],\n ... }\n\n >>> new_scaffolds = remove_spurious_insertions(scaffolds)\n >>> for my_bin in new_scaffolds['scaffold1']:\n ... print(my_bin)\n ...\n ['contig1', 0, 0, 100, 1]\n ['contig1', 1, 100, 200, 1]\n ['contig1', 4, 254, 408, 1]\n ['contig1', 7, 805, 1253, 1]\n ['contig5', 23, 1500, 1605, -1]\n ['contig5', 22, 1385, 1499, -1]\n\n >>> for my_bin in new_scaffolds['scaffold2']:\n ... print(my_bin)\n ...\n ['contig8', 0, 0, 250, 1]\n ['contig8', 2, 320, 480, 1]\n\n\n \"\"\"\n\n scaffolds = format_info_frags(scaffolds)\n new_scaffolds = {}\n for name, scaffold in scaffolds.items():\n new_scaffold = []\n if len(scaffold) > 2:\n for i in range(len(scaffold)):\n # First take care of edge cases: *-- or --*\n if i == 0:\n if not (\n scaffold[i][0] != scaffold[i + 1][0]\n and scaffold[i + 1][0] == scaffold[i + 2][0]\n ):\n new_scaffold.append(scaffold[i])\n elif i == len(scaffold) - 1:\n if not (\n scaffold[i][0] != scaffold[i - 1][0]\n and scaffold[i - 1][0] == scaffold[i - 2][0]\n ):\n new_scaffold.append(scaffold[i])\n # Otherwise, looking for -*-\n else:\n if not (\n scaffold[i - 1][0] == scaffold[i + 1][0]\n and scaffold[i - 1][0] != scaffold[i][0]\n ):\n new_scaffold.append(scaffold[i])\n else:\n # Can't remove insertions if 2 bins or less\n new_scaffold = copy.deepcopy(scaffold)\n\n new_scaffolds[name] = new_scaffold\n\n return new_scaffolds", "response": "Remove all spurious insertions in scaffolds."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef correct_spurious_inversions(scaffolds, criterion=\"colinear\"):\n\n scaffolds = format_info_frags(scaffolds)\n new_scaffolds = {}\n\n def is_cis(bin1, bin2):\n return bin1[0] == bin2[0]\n\n def is_contiguous(bin1, bin2):\n return is_cis(bin1, bin2) and bin1[3] == bin2[2]\n\n def is_colinear(bin1, bin2):\n return is_cis(bin1, bin2) and bin1[3] <= bin2[2]\n\n condition_callables = {\n \"cis\": is_cis,\n \"colinear\": is_colinear,\n \"contiguous\": is_contiguous,\n }\n block_test = condition_callables.get(criterion, \"colinear\")\n\n for name, scaffold in scaffolds.items():\n new_scaffold = []\n\n block_cumulative_ori = 0\n\n if len(scaffold) > 2:\n current_bin = scaffold[0]\n block_buffer = []\n for my_bin in scaffold:\n\n if not block_buffer:\n new_bin = copy.deepcopy(my_bin)\n block_buffer.append(new_bin)\n block_cumulative_ori = my_bin[-1]\n continue\n\n elif not block_test(current_bin, my_bin):\n for my_buf_bin in block_buffer:\n new_bin = copy.deepcopy(my_buf_bin)\n if block_cumulative_ori >= 0:\n new_bin[-1] = 1\n else:\n new_bin[-1] = -1\n new_scaffold.append(new_bin)\n block_cumulative_ori = my_bin[-1]\n current_bin = copy.deepcopy(my_bin)\n block_buffer = copy.deepcopy([my_bin])\n\n else:\n block_cumulative_ori += my_bin[-1]\n new_bin = copy.deepcopy(my_bin)\n block_buffer.append(new_bin)\n current_bin = my_bin\n\n for my_bin in block_buffer:\n new_bin = copy.deepcopy(my_bin)\n if block_cumulative_ori >= 0:\n new_bin[-1] = 1\n else:\n new_bin[-1] = -1\n new_scaffold.append(new_bin)\n new_scaffolds[name] = copy.deepcopy(new_scaffold)\n\n else:\n new_scaffolds[name] = copy.deepcopy(scaffold)\n\n return new_scaffolds", "response": "Inversion of the inversion of each bin in the given scaffolds."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef rearrange_intra_scaffolds(scaffolds):\n\n \"\"\"Rearranges all bins within each scaffold such that all bins belonging\n to the same initial contig are grouped together in the same order. When\n two such groups are found, the smaller one is moved to the larger one.\n \"\"\"\n\n scaffolds = format_info_frags(scaffolds)\n new_scaffolds = {}\n\n ordering = dict()\n\n for name, scaffold in scaffolds.items():\n new_scaffold = []\n\n ordering = dict()\n order = 0\n\n my_blocks = []\n\n for _, my_block in itertools.groupby(scaffold, operator.itemgetter(0)):\n\n my_bins = list(my_block)\n my_blocks.append(my_bins)\n block_length = len(my_bins)\n block_name = my_bins[0][0]\n\n if block_name in ordering.keys():\n if block_length > ordering[block_name][1]:\n ordering[block_name] = (order, block_length)\n else:\n ordering[block_name] = (order, block_length)\n order += 1\n\n def block_order(block):\n return ordering[block[0][0]][0]\n\n for my_block in sorted(my_blocks, key=block_order):\n for my_bin in my_block:\n new_scaffold.append(my_bin)\n\n new_scaffolds[name] = copy.deepcopy(new_scaffold)\n\n return new_scaffolds", "response": "Rearrange all bins within each scaffold in the same order."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nconvert an info_frags. txt file into a fasta file given a reference.", "response": "def write_fasta(\n init_fasta, info_frags, output=DEFAULT_NEW_GENOME_NAME, junction=False\n):\n\n \"\"\"Convert an info_frags.txt file into a fasta file given a reference.\n Optionally adds junction sequences to reflect the possibly missing base\n pairs between two newly joined scaffolds.\n \"\"\"\n\n init_genome = {\n record.id: record.seq for record in SeqIO.parse(init_fasta, \"fasta\")\n }\n my_new_records = []\n with open(info_frags, \"r\") as info_frags_handle:\n current_seq = \"\"\n current_id = None\n previous_contig = None\n for line in info_frags_handle:\n if line.startswith(\">\"):\n previous_contig = None\n if current_id is not None:\n new_record = SeqRecord(\n current_seq, id=current_id, description=\"\"\n )\n my_new_records.append(new_record)\n current_seq = \"\"\n current_id = str(line[1:])\n elif line.startswith(\"init_contig\"):\n previous_contig = None\n else:\n (init_contig, _, orientation, pos_start, pos_end) = str(\n line[:-1]\n ).split(\"\\t\")\n\n start = int(pos_start)\n end = int(pos_end)\n ori = int(orientation)\n\n assert start < end\n assert ori in {-1, 1}\n\n seq_to_add = init_genome[init_contig][start:end]\n if ori == 1:\n current_seq += seq_to_add\n elif ori == -1:\n current_seq += seq_to_add.reverse_complement()\n\n if junction and previous_contig not in {init_contig, None}:\n error_was_raised = False\n try:\n extra_seq = Seq(junction, IUPAC.ambiguous_dna)\n current_seq = extra_seq + current_seq\n except TypeError:\n if not error_was_raised:\n print(\"Invalid junction sequence\")\n error_was_raised = True\n previous_contig = init_contig\n\n new_record = SeqRecord(current_seq, id=current_id, description=\"\")\n my_new_records.append(new_record)\n SeqIO.write(my_new_records, output, \"fasta\")"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncheck if a bin list contains exclusively consecutive bin ids.", "response": "def is_block(bin_list):\n\n \"\"\"Check if a bin list has exclusively consecutive bin ids.\n \"\"\"\n id_set = set((my_bin[1] for my_bin in bin_list))\n start_id, end_id = min(id_set), max(id_set)\n return id_set == set(range(start_id, end_id + 1))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncalculating the internal to external gradiant", "response": "def internal2external_grad(xi, bounds):\n \"\"\" \n Calculate the internal to external gradiant\n \n Calculates the partial of external over internal\n \n \"\"\"\n\n ge = np.empty_like(xi)\n\n for i, (v, bound) in enumerate(zip(xi, bounds)):\n\n a = bound[0] # minimum\n b = bound[1] # maximum\n\n if a == None and b == None: # No constraints\n ge[i] = 1.0\n\n elif b == None: # only min\n ge[i] = v / np.sqrt(v ** 2 + 1)\n\n elif a == None: # only max\n ge[i] = -v / np.sqrt(v ** 2 + 1)\n\n else: # both min and max\n ge[i] = (b - a) * np.cos(v) / 2.\n\n return ge"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nconverts a series of internal variables to external variables", "response": "def internal2external(xi, bounds):\n \"\"\" Convert a series of internal variables to external variables\"\"\"\n\n xe = np.empty_like(xi)\n\n for i, (v, bound) in enumerate(zip(xi, bounds)):\n\n a = bound[0] # minimum\n b = bound[1] # maximum\n\n if a == None and b == None: # No constraints\n xe[i] = v\n\n elif b == None: # only min\n xe[i] = a - 1. + np.sqrt(v ** 2. + 1.)\n\n elif a == None: # only max\n xe[i] = b + 1. - np.sqrt(v ** 2. + 1.)\n\n else: # both min and max\n xe[i] = a + ((b - a) / 2.) * (np.sin(v) + 1.)\n\n return xe"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef external2internal(xe, bounds):\n\n xi = np.empty_like(xe)\n\n for i, (v, bound) in enumerate(zip(xe, bounds)):\n\n a = bound[0] # minimum\n b = bound[1] # maximum\n\n if a == None and b == None: # No constraints\n xi[i] = v\n\n elif b == None: # only min\n xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)\n\n elif a == None: # only max\n xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)\n\n else: # both min and max\n xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.)\n\n return xi", "response": "Convert a series of external variables to internal variables"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncalculates cov_x from fjac ipvt and p as is done in leastsq", "response": "def calc_cov_x(infodic, p):\n \"\"\"\n Calculate cov_x from fjac, ipvt and p as is done in leastsq\n \"\"\"\n\n fjac = infodic[\"fjac\"]\n ipvt = infodic[\"ipvt\"]\n n = len(p)\n\n # adapted from leastsq function in scipy/optimize/minpack.py\n perm = np.take(np.eye(n), ipvt - 1, 0)\n r = np.triu(np.transpose(fjac)[:n, :])\n R = np.dot(r, perm)\n try:\n cov_x = np.linalg.inv(np.dot(np.transpose(R), R))\n except LinAlgError:\n cov_x = None\n return cov_x"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nminimizes the sum of squares of a given function on a given set of variables.", "response": "def leastsqbound(func, x0, bounds, args=(), **kw):\n \"\"\"\n Constrained multivariant Levenberg-Marquard optimization\n\n Minimize the sum of squares of a given function using the \n Levenberg-Marquard algorithm. Contraints on parameters are inforced using \n variable transformations as described in the MINUIT User's Guide by\n Fred James and Matthias Winkler.\n\n Parameters:\n\n * func functions to call for optimization.\n * x0 Starting estimate for the minimization.\n * bounds (min,max) pair for each element of x, defining the bounds on\n that parameter. Use None for one of min or max when there is\n no bound in that direction.\n * args Any extra arguments to func are places in this tuple.\n\n Returns: (x,{cov_x,infodict,mesg},ier)\n\n Return is described in the scipy.optimize.leastsq function. x and con_v \n are corrected to take into account the parameter transformation, infodic \n is not corrected.\n\n Additional keyword arguments are passed directly to the \n scipy.optimize.leastsq algorithm. \n\n \"\"\"\n # check for full output\n if \"full_output\" in kw and kw[\"full_output\"]:\n full = True\n else:\n full = False\n\n # convert x0 to internal variables\n i0 = external2internal(x0, bounds)\n\n # perfrom unconstrained optimization using internal variables\n r = leastsq(err, i0, args=(bounds, func, args), **kw)\n\n # unpack return convert to external variables and return\n if full:\n xi, cov_xi, infodic, mesg, ier = r\n xe = internal2external(xi, bounds)\n cov_xe = i2e_cov_x(xi, bounds, cov_xi)\n # XXX correct infodic 'fjac','ipvt', and 'qtf'\n return xe, cov_xe, infodic, mesg, ier\n\n else:\n xi, ier = r\n xe = internal2external(xi, bounds)\n return xe, ier"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef start_review(self):\n if self.set_status:\n self.github_repo.create_status(\n state=\"pending\",\n description=\"Static analysis in progress.\",\n context=\"inline-plz\",\n sha=self.last_sha,\n )", "response": "Mark our review as started."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmarks our review as finished.", "response": "def finish_review(self, success=True, error=False):\n \"\"\"Mark our review as finished.\"\"\"\n if self.set_status:\n if error:\n self.github_repo.create_status(\n state=\"error\",\n description=\"Static analysis error! inline-plz failed to run.\",\n context=\"inline-plz\",\n sha=self.last_sha,\n )\n elif success:\n self.github_repo.create_status(\n state=\"success\",\n description=\"Static analysis complete! No errors found in your PR.\",\n context=\"inline-plz\",\n sha=self.last_sha,\n )\n else:\n self.github_repo.create_status(\n state=\"failure\",\n description=\"Static analysis complete! Found errors in your PR.\",\n context=\"inline-plz\",\n sha=self.last_sha,\n )"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef out_of_date(self):\n try:\n latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha\n print(\"Latest remote sha: {}\".format(latest_remote_sha))\n try:\n print(\"Ratelimit remaining: {}\".format(self.github.ratelimit_remaining))\n except Exception:\n print(\"Failed to look up ratelimit remaining\")\n return self.last_sha != latest_remote_sha\n except IndexError:\n return False", "response": "Check if our local latest sha matches the remote latest sha"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncalculate the position within the PR which is not the line number", "response": "def position(self, message):\n \"\"\"Calculate position within the PR, which is not the line number\"\"\"\n if not message.line_number:\n message.line_number = 1\n for patched_file in self.patch:\n target = patched_file.target_file.lstrip(\"b/\")\n if target == message.path:\n offset = 1\n for hunk in patched_file:\n for position, hunk_line in enumerate(hunk):\n if hunk_line.target_line_no == message.line_number:\n if not hunk_line.is_added:\n # if the line isn't an added line, we don't want to comment on it\n return\n\n return position + offset\n\n offset += len(hunk) + 1"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef build_and_filter(base_folder, size_pyramid, factor, thresh_factor=1):\n\n min_bin_per_contig = 1\n fact_sub_sampling = factor\n\n all_pyramid_folder = os.path.join(base_folder, \"pyramids\")\n if not (os.path.exists(all_pyramid_folder)):\n os.mkdir(all_pyramid_folder)\n init_pyramid_folder = os.path.join(\n all_pyramid_folder, \"pyramid_\" + str(1) + \"_no_thresh\"\n )\n if not (os.path.exists(init_pyramid_folder)):\n init_size_pyramid = 1\n build(base_folder, init_size_pyramid, factor, min_bin_per_contig)\n init_pyramid_folder_level_0 = os.path.join(init_pyramid_folder, \"level_0\")\n contig_info = os.path.join(\n init_pyramid_folder_level_0, \"0_contig_info.txt\"\n )\n fragments_list = os.path.join(\n init_pyramid_folder_level_0, \"0_fragments_list.txt\"\n )\n init_abs_fragments_contacts = os.path.join(\n init_pyramid_folder_level_0, \"0_abs_frag_contacts.txt\"\n )\n\n init_pyramid_file = os.path.join(init_pyramid_folder, \"pyramid.hdf5\")\n\n pyramid_folder = os.path.join(\n all_pyramid_folder, \"pyramid_\" + str(size_pyramid) + \"_thresh_auto\"\n )\n if not (os.path.exists(pyramid_folder)):\n os.mkdir(pyramid_folder)\n level = 0\n pyramid_level_folder = os.path.join(pyramid_folder, \"level_\" + str(level))\n if not (os.path.exists(pyramid_level_folder)):\n os.mkdir(pyramid_level_folder)\n\n current_contig_info = os.path.join(\n pyramid_level_folder, str(level) + \"_contig_info.txt\"\n )\n current_frag_list = os.path.join(\n pyramid_level_folder, str(level) + \"_fragments_list.txt\"\n )\n current_abs_fragments_contacts = os.path.join(\n pyramid_level_folder, str(level) + \"_abs_frag_contacts.txt\"\n )\n if not (\n os.path.exists(current_contig_info)\n and os.path.exists(current_frag_list)\n and os.path.exists(current_abs_fragments_contacts)\n ):\n logger.info(\"start filtering\")\n pyramid_0 = h5py.File(init_pyramid_file)\n\n remove_problematic_fragments(\n contig_info,\n fragments_list,\n init_abs_fragments_contacts,\n current_contig_info,\n current_frag_list,\n current_abs_fragments_contacts,\n pyramid_0,\n thresh_factor=thresh_factor,\n )\n pyramid_0.close()\n #\n else:\n logger.info(\"filtering already done...\")\n\n hdf5_pyramid_file = os.path.join(pyramid_folder, \"pyramid.hdf5\")\n\n pyramid_handle = h5py.File(hdf5_pyramid_file)\n\n pyramid_level_folder = os.path.join(pyramid_folder, \"level_\" + str(level))\n level_pyramid = str(level) + \"_\"\n sub_2_super_frag_index_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"sub_2_super_index_frag.txt\"\n )\n for level in range(0, size_pyramid):\n pyramid_level_folder = os.path.join(\n pyramid_folder, \"level_\" + str(level)\n )\n if not (os.path.exists(pyramid_level_folder)):\n os.mkdir(pyramid_level_folder)\n level_pyramid = str(level) + \"_\"\n new_contig_list_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"contig_info.txt\"\n )\n new_fragments_list_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"fragments_list.txt\"\n )\n new_abs_fragments_contacts_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"abs_frag_contacts.txt\"\n )\n\n if level > 0:\n if (\n os.path.exists(new_contig_list_file)\n and os.path.exists(new_fragments_list_file)\n and os.path.exists(new_abs_fragments_contacts_file)\n and os.path.exists((sub_2_super_frag_index_file))\n ):\n logger.info(\"level already built\")\n nfrags = file_len(new_fragments_list_file) - 1\n else: # this should never append !!!\n logger.info(\"writing new_files..\")\n nfrags = subsample_data_set(\n current_contig_info,\n current_frag_list,\n fact_sub_sampling,\n current_abs_fragments_contacts,\n new_abs_fragments_contacts_file,\n min_bin_per_contig,\n new_contig_list_file,\n new_fragments_list_file,\n sub_2_super_frag_index_file,\n )\n else:\n if (\n os.path.exists(new_contig_list_file)\n and os.path.exists(new_fragments_list_file)\n and os.path.exists(new_abs_fragments_contacts_file)\n ):\n logger.info(\"level already built...\")\n nfrags = file_len(new_fragments_list_file) - 1\n\n try:\n status = pyramid_handle.attrs[str(level)] == \"done\"\n except KeyError:\n pyramid_handle.attrs[str(level)] = \"pending\"\n status = False\n if not (status):\n logger.info(\"Start filling the pyramid\")\n # level_to_fill = pyramid_handle.create_dataset(str(level),\n # (nfrags,nfrags), 'i')\n # fill_pyramid_level(level_to_fill,new_abs_fragments_contacts_file,\n # size_chunk,nfrags)\n fill_sparse_pyramid_level(\n pyramid_handle, level, new_abs_fragments_contacts_file, nfrags\n )\n pyramid_handle.attrs[str(level)] = \"done\"\n\n current_frag_list = new_fragments_list_file\n current_contig_info = new_contig_list_file\n current_abs_fragments_contacts = new_abs_fragments_contacts_file\n sub_2_super_frag_index_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"sub_2_super_index_frag.txt\"\n )\n logger.info(\"pyramid built.\")\n\n obj_pyramid = pyramid(pyramid_folder, size_pyramid)\n pyramid_handle.close()\n return obj_pyramid", "response": "Build a filtered pyramid of contact maps and remove high coverage and short fragments."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nbuild a single - scale fragment pyramid of contact maps.", "response": "def build(base_folder, size_pyramid, factor, min_bin_per_contig):\n \"\"\"Build a pyramid of contact maps\n\n Build a fragment pyramid for multi-scale analysis\n \n Parameters\n ----------\n base_folder : str or pathlib.Path\n Where to create the hdf5 files containing the matrices.\n size_pyramid : int\n How many levels (contact maps of decreasing resolution) to generate.\n factor : int\n Subsampling factor (binning) from one level to the next.\n min_bin_per_contig : int\n The minimum number of bins per contig below which binning shall not be\n performed.\n \"\"\"\n\n fact_sub_sampling = factor\n contig_info = os.path.join(base_folder, \"info_contigs.txt\")\n fragments_list = os.path.join(base_folder, \"fragments_list.txt\")\n init_abs_fragments_contacts = os.path.join(\n base_folder, \"abs_fragments_contacts_weighted.txt\"\n )\n all_pyramid_folder = os.path.join(base_folder, \"pyramids\")\n pyramid_folder = os.path.join(\n all_pyramid_folder, \"pyramid_\" + str(size_pyramid) + \"_no_thresh\"\n )\n\n if not (os.path.exists(all_pyramid_folder)):\n os.mkdir(all_pyramid_folder)\n\n if not (os.path.exists(pyramid_folder)):\n os.mkdir(pyramid_folder)\n\n hdf5_pyramid_file = os.path.join(pyramid_folder, \"pyramid.hdf5\")\n pyramid_handle = h5py.File(hdf5_pyramid_file)\n level = 0\n pyramid_level_folder = os.path.join(pyramid_folder, \"level_\" + str(level))\n if not (os.path.exists(pyramid_level_folder)):\n os.mkdir(pyramid_level_folder)\n\n current_contig_info = os.path.join(\n pyramid_level_folder, str(level) + \"_contig_info.txt\"\n )\n current_frag_list = os.path.join(\n pyramid_level_folder, str(level) + \"_fragments_list.txt\"\n )\n current_abs_fragments_contacts = os.path.join(\n pyramid_level_folder, str(level) + \"_abs_frag_contacts.txt\"\n )\n for level in range(0, size_pyramid):\n pyramid_level_folder = os.path.join(\n pyramid_folder, \"level_\" + str(level)\n )\n if not (os.path.exists(pyramid_level_folder)):\n os.mkdir(pyramid_level_folder)\n level_pyramid = str(level) + \"_\"\n if level == 0:\n shutil.copyfile(contig_info, current_contig_info)\n shutil.copyfile(\n init_abs_fragments_contacts, current_abs_fragments_contacts\n )\n nfrags = init_frag_list(fragments_list, current_frag_list)\n new_abs_fragments_contacts_file = current_abs_fragments_contacts\n new_contig_list_file = current_contig_info\n new_fragments_list_file = current_frag_list\n sub_2_super_frag_index_file = os.path.join(\n pyramid_level_folder,\n level_pyramid + \"sub_2_super_index_frag.txt\",\n )\n\n else:\n\n new_contig_list_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"contig_info.txt\"\n )\n new_fragments_list_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"fragments_list.txt\"\n )\n new_abs_fragments_contacts_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"abs_frag_contacts.txt\"\n )\n if (\n os.path.exists(new_contig_list_file)\n and os.path.exists(new_fragments_list_file)\n and os.path.exists(new_abs_fragments_contacts_file)\n and os.path.exists(sub_2_super_frag_index_file)\n ):\n logger.info(\"level already built...\")\n nfrags = file_len(new_fragments_list_file) - 1\n else:\n logger.info(\"writing new_files..\")\n nfrags = subsample_data_set(\n current_contig_info,\n current_frag_list,\n fact_sub_sampling,\n current_abs_fragments_contacts,\n new_abs_fragments_contacts_file,\n min_bin_per_contig,\n new_contig_list_file,\n new_fragments_list_file,\n sub_2_super_frag_index_file,\n )\n\n try:\n status = pyramid_handle.attrs[str(level)] == \"done\"\n except KeyError:\n pyramid_handle.attrs[str(level)] = \"pending\"\n status = False\n if not (status):\n logger.info(\"Start filling the pyramid\")\n fill_sparse_pyramid_level(\n pyramid_handle, level, new_abs_fragments_contacts_file, nfrags\n )\n pyramid_handle.attrs[str(level)] = \"done\"\n current_frag_list = new_fragments_list_file\n current_contig_info = new_contig_list_file\n current_abs_fragments_contacts = new_abs_fragments_contacts_file\n sub_2_super_frag_index_file = os.path.join(\n pyramid_level_folder, level_pyramid + \"sub_2_super_index_frag.txt\"\n )\n logger.info(\"pyramid built.\")\n pyramid_handle.close()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nconvert contact maps between old - style and new - style formats.", "response": "def abs_contact_2_coo_file(abs_contact_file, coo_file):\n \"\"\"Convert contact maps between old-style and new-style formats.\n\n A legacy function that converts contact maps from the older GRAAL format to\n the simpler instaGRAAL format. This is useful with datasets generated by\n Hi-C box.\n\n Parameters\n ----------\n abs_contact_file : str, file or pathlib.Path\n The input old-style contact map.\n coo_file : str, file, or pathlib.Path\n The output path to the generated contact map; must be writable.\n \"\"\"\n\n sparse_dict = dict()\n h = open(abs_contact_file, \"r\")\n all_lines = h.readlines()\n n_lines = len(all_lines)\n for i in range(1, n_lines):\n\n line = all_lines[i]\n dat = line.split()\n mates = [int(dat[0]), int(dat[1])]\n mates.sort()\n f1 = mates[0] - 1\n f2 = mates[1] - 1\n if f1 in sparse_dict:\n if f2 in sparse_dict[f1]:\n sparse_dict[f1][f2] += 1\n else:\n sparse_dict[f1][f2] = 1\n else:\n sparse_dict[f1] = dict()\n sparse_dict[f1][f2] = 1\n\n keys = list(sparse_dict.keys())\n keys.sort()\n h.close()\n h_coo = open(coo_file, \"w\")\n h_coo.write(\"%s\\t%s\\t%s\\n\" % (\"id_frag_a\", \"id_frag_b\", \"n_contact\"))\n for fa in keys:\n d_fb = sparse_dict[fa]\n keys_b = list(d_fb.keys())\n keys_b.sort()\n for fb in keys_b:\n nc = d_fb[fb]\n h_coo.write(\"%s\\t%s\\t%s\\n\" % (str(fa), str(fb), str(nc)))\n h_coo.close()\n h.close()"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nfilling a level with sparse contact map data from the simple text matrix file to the hdf5 - based pyramid level.", "response": "def fill_sparse_pyramid_level(pyramid_handle, level, contact_file, nfrags):\n \"\"\"Fill a level with sparse contact map data\n\n Fill values from the simple text matrix file to the hdf5-based pyramid\n level with contact data.\n \n Parameters\n ----------\n pyramid_handle : h5py.File\n The hdf5 file handle containing the whole dataset.\n level : int\n The level (resolution) to be filled with contact data.\n contact_file : str, file or pathlib.Path\n The binned contact map file to be converted to hdf5 data.\n nfrags : int\n The number of fragments/bins in that specific level.\n \"\"\"\n\n sparse_dict = dict()\n h = open(contact_file, \"r\")\n all_lines = h.readlines()\n n_lines = len(all_lines)\n for i in range(1, n_lines):\n\n line = all_lines[i]\n dat = line.split()\n mates = [int(dat[0]), int(dat[1])]\n nc = int(dat[2])\n mates.sort()\n f1 = mates[0]\n f2 = mates[1]\n\n if f1 in sparse_dict:\n if f2 in sparse_dict[f1]:\n sparse_dict[f1][f2] += nc\n else:\n sparse_dict[f1][f2] = nc\n else:\n sparse_dict[f1] = dict()\n sparse_dict[f1][f2] = nc\n\n keys = list(sparse_dict.keys())\n keys.sort()\n\n out_r = []\n out_c = []\n out_d = []\n\n for r in keys:\n data = sparse_dict[r]\n for c in list(data.keys()):\n out_r.append(r)\n out_c.append(c)\n out_d.append(data[c])\n\n n_on_pxls = len(out_d)\n level_hdf5 = pyramid_handle.create_group(str(level))\n data_2_sparse = level_hdf5.create_dataset(\"data\", (3, n_on_pxls), \"i\")\n data_nfrags = level_hdf5.create_dataset(\"nfrags\", (1, 1), \"i\")\n np_csr = np.zeros((3, n_on_pxls), dtype=np.int32)\n np_csr[0, :] = out_r\n np_csr[1, :] = out_c\n np_csr[2, :] = out_d\n data_2_sparse[0, :] = out_r\n data_2_sparse[1, :] = out_c\n data_2_sparse[2, :] = out_d\n data_nfrags[:] = nfrags"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef init_frag_list(fragment_list, new_frag_list):\n\n handle_frag_list = open(fragment_list, \"r\")\n handle_new_frag_list = open(new_frag_list, \"w\")\n handle_new_frag_list.write(\n \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (\n \"id\",\n \"chrom\",\n \"start_pos\",\n \"end_pos\",\n \"size\",\n \"gc_content\",\n \"accu_frag\",\n \"frag_start\",\n \"frag_end\",\n )\n )\n handle_frag_list.readline()\n i = 0\n while 1:\n line_frag = handle_frag_list.readline()\n if not line_frag:\n handle_frag_list.close()\n handle_new_frag_list.close()\n break\n i += 1\n data = line_frag.split(\"\\t\")\n id_init = data[0]\n contig_name = data[1]\n start_pos = data[2]\n end_pos = data[3]\n length_kb = data[4]\n gc_content = str(float(data[5]))\n accu_frag = str(1)\n frag_start = id_init\n frag_end = id_init\n handle_new_frag_list.write(\n \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (\n id_init,\n contig_name,\n start_pos,\n end_pos,\n length_kb,\n gc_content,\n accu_frag,\n frag_start,\n frag_end,\n )\n )\n\n return i", "response": "Adapt the original fragment list to fit the build function requirements\n "} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef zoom_in_frag(self, curr_frag):\n level = curr_frag[1]\n frag = curr_frag[0]\n output = []\n if level > 0:\n str_level = str(level)\n sub_low = self.spec_level[str_level][\"fragments_dict\"][frag][\n \"sub_low_index\"\n ]\n sub_high = self.spec_level[str_level][\"fragments_dict\"][frag][\n \"sub_high_index\"\n ]\n new_level = level - 1\n for i in range(sub_low, sub_high + 1):\n output.append((i, new_level))\n else:\n output.append(curr_frag)\n return output", "response": "zooms in the frag list to the level that the curr_frag is in."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef zoom_out_frag(self, curr_frag):\n level = curr_frag[1]\n frag = curr_frag[0]\n output = []\n if level > 0:\n str_level = str(level)\n high_frag = self.spec_level[str_level][\"fragments_dict\"][frag][\n \"super_index\"\n ]\n new_level = level + 1\n output = (high_frag, new_level)\n else:\n output = curr_frag\n return output", "response": "zoom out the frag in the spec_level"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef zoom_in_pixel(self, curr_pixel):\n low_frag = curr_pixel[0]\n high_frag = curr_pixel[1]\n level = curr_pixel[2]\n if level > 0:\n str_level = str(level)\n low_sub_low = self.spec_level[str_level][\"fragments_dict\"][\n low_frag\n ][\"sub_low_index\"]\n low_sub_high = self.spec_level[str_level][\"fragments_dict\"][\n low_frag\n ][\"sub_high_index\"]\n high_sub_low = self.spec_level[str_level][\"fragments_dict\"][\n high_frag\n ][\"sub_low_index\"]\n high_sub_high = self.spec_level[str_level][\"fragments_dict\"][\n high_frag\n ][\"sub_high_index\"]\n vect = [low_sub_low, low_sub_high, high_sub_low, high_sub_high]\n new_pix_low = min(vect)\n new_pix_high = max(vect)\n new_level = level - 1\n new_pixel = [new_pix_low, new_pix_high, new_level]\n else:\n new_pixel = curr_pixel\n return new_pixel", "response": "return the curr_frag at a higher resolution"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef zoom_out_pixel(self, curr_pixel):\n low_frag = curr_pixel[0]\n high_frag = curr_pixel[1]\n level = curr_pixel[2]\n str_level = str(level)\n if level < self.n_level - 1:\n low_super = self.spec_level[str_level][\"fragments_dict\"][low_frag][\n \"super_index\"\n ]\n high_super = self.spec_level[str_level][\"fragments_dict\"][\n high_frag\n ][\"sub_index\"]\n\n new_pix_low = min([low_super, high_super])\n new_pix_high = max([low_super, high_super])\n new_level = level + 1\n new_pixel = [new_pix_low, new_pix_high, new_level]\n else:\n new_pixel = curr_pixel\n return new_pixel", "response": "return the curr_frag at a lower resolution"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef load_data(self, pyramid):\n import colorsys\n\n logger.info(\"loading data from level = {}\".format(self.level))\n # self.im_init = np.array(pyramid.data[str(self.level)],\n # dtype=np.int32)\n # self.n_frags = self.im_init.shape[0]\n\n # start loading sparse matrix #####\n self.n_frags = int(np.copy(pyramid.data[str(self.level)][\"nfrags\"][0]))\n self.np_2_scipy_sparse = np.copy(pyramid.data[str(self.level)][\"data\"])\n self.sparse_mat_csr = sp.csr_matrix(\n (self.np_2_scipy_sparse[2, :], self.np_2_scipy_sparse[0:2, :]),\n shape=(self.n_frags, self.n_frags),\n )\n self.sparse_mat_csc = sp.csc_matrix(\n (self.np_2_scipy_sparse[2, :], self.np_2_scipy_sparse[0:2, :]),\n shape=(self.n_frags, self.n_frags),\n )\n # end loading sparse matrix #####\n\n int_n_frags = np.int32(self.n_frags)\n self.pos_vect_frags_4_GL = np.ndarray(\n (int_n_frags, 4), dtype=np.float32\n )\n self.col_vect_frags_4_GL = np.ndarray(\n (int_n_frags, 4), dtype=np.float32\n )\n\n self.dict_contigs = dict()\n ContFrags = pyramid.spec_level[str(self.level)][\"contigs_dict\"]\n if str(self.level - 1) in pyramid.spec_level:\n subContFrags = pyramid.spec_level[str(self.level - 1)][\n \"contigs_dict\"\n ]\n else:\n subContFrags = ContFrags\n coord_cont = dict()\n\n n_contigs = len(list(ContFrags.keys()))\n # print \"n init contigs = \", n_contigs\n HSV_tuples = [\n (x * 2.5 / n_contigs, 0.5, 0.5) for x in range(n_contigs)\n ]\n RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]\n self.distri_frag = []\n\n # list_contigs = ContFrags.keys()\n # print \"Cont frags keys = \", ContFrags.keys()\n list_contigs_id = pyramid.list_contigs_id\n # debugggg\n # list_contigs.sort()\n\n for i in range(0, self.n_frags):\n self.frags_init_contigs.append(\"\")\n\n for id_cont in list_contigs_id:\n self.dict_contigs[id_cont] = dict()\n cont_frag = ContFrags[id_cont]\n sub_cont_frag = subContFrags[id_cont]\n coord_cont[id_cont] = []\n name_contig = cont_frag[0].init_contig\n tick_kb = []\n end_frags_kb = []\n\n id_f_prev = -1\n lim_curr_cont = len(cont_frag)\n sub_lim_curr_cont = len(sub_cont_frag)\n len_curr_contig_bp = 0\n curr_sub_pos = 0\n for id_f in range(0, lim_curr_cont):\n f = cont_frag[id_f]\n lkb = f.length_kb\n len_curr_contig_bp += lkb\n for id_f in range(0, lim_curr_cont):\n f = cont_frag[id_f]\n lkb = f.length_kb\n self.distri_frag.append(lkb)\n start = f.start_pos\n end = f.end_pos\n pos_kb = start + lkb / 2.0\n tick_kb.append(pos_kb)\n end_frags_kb.append(end)\n id_f_curr = f.np_id_abs - 1\n n_accu_frags = f.n_accu_frags\n n_sub_frags = f.sub_frag_end - f.sub_frag_start + 1\n\n self.frags_init_contigs[id_f_curr] = f.init_contig\n\n if id_f < lim_curr_cont - 1:\n id_f_next = cont_frag[id_f + 1].np_id_abs - 1\n else:\n id_f_next = -1\n f_T_frag = (\n np.int32(f.curr_id - 1),\n np.int32(f.contig_id),\n np.int32(start),\n np.int32(lkb),\n np.int32(0),\n np.int32(id_f_curr),\n np.int32(id_f_prev),\n np.int32(id_f_next),\n np.int32(lim_curr_cont),\n np.int32(len_curr_contig_bp),\n np.int32(n_accu_frags),\n )\n self.pos_vect_frags_4_GL[id_f_curr, 0] = (\n np.float32(f.curr_id - 1) / 100.0\n )\n self.pos_vect_frags_4_GL[id_f_curr, 1] = (\n np.float32(f.contig_id) / 100.0\n )\n # self.pos_vect_frags_4_GL[id_f_curr, 2] = np.float32(0.0)\n self.pos_vect_frags_4_GL[id_f_curr, 2] = 0.0\n self.pos_vect_frags_4_GL[id_f_curr, 3] = np.float32(1.0)\n # GL_pos_frag = (np.float32(f.curr_id - 1),\n # np.float32(f.contig_id),\n # np.float32(lim_curr_cont),\n # np.float32(0.0))\n # self.col_vect_frags_4_GL[id_f_curr,0] = np.float32(0.)\n # self.col_vect_frags_4_GL[id_f_curr,1] = np.float32(1.)\n # self.col_vect_frags_4_GL[id_f_curr,2] = np.float32(0.)\n # self.col_vect_frags_4_GL[id_f_curr,3] = np.float32(1.)\n self.col_vect_frags_4_GL[id_f_curr, 0] = np.float32(\n RGB_tuples[id_cont - 1][0]\n )\n self.col_vect_frags_4_GL[id_f_curr, 1] = np.float32(\n RGB_tuples[id_cont - 1][1]\n )\n self.col_vect_frags_4_GL[id_f_curr, 2] = np.float32(\n RGB_tuples[id_cont - 1][2]\n )\n self.col_vect_frags_4_GL[id_f_curr, 3] = np.float32(1.0)\n\n # GL_col_frag = (np.float32(RGB_tuples[id_cont - 1][0],\n # np.float32(RGB_tuples[id_cont - 1][1]),\n # np.float32(RGB_tuples[id_cont - 1][2]),\n # np.float32(1.0))\n self.S_o_A_frags[\"pos\"].append(f.curr_id - 1)\n self.S_o_A_frags[\"sub_pos\"].append(curr_sub_pos) # TO DO !!!!\n self.S_o_A_frags[\"id_c\"].append(f.contig_id)\n self.S_o_A_frags[\"start_bp\"].append(start)\n self.S_o_A_frags[\"len_bp\"].append(lkb)\n self.S_o_A_frags[\"sub_len\"].append(n_sub_frags)\n self.S_o_A_frags[\"circ\"].append(0)\n self.S_o_A_frags[\"id\"].append(id_f_curr)\n self.S_o_A_frags[\"prev\"].append(id_f_prev)\n self.S_o_A_frags[\"next\"].append(id_f_next)\n self.S_o_A_frags[\"l_cont\"].append(lim_curr_cont)\n self.S_o_A_frags[\"sub_l_cont\"].append(sub_lim_curr_cont)\n self.S_o_A_frags[\"l_cont_bp\"].append(len_curr_contig_bp)\n self.S_o_A_frags[\"n_accu\"].append(n_accu_frags)\n\n id_f_prev = id_f_curr\n self.vect_frag_np.append(f_T_frag)\n curr_sub_pos += n_sub_frags\n\n # self.pos_vect_frags_4_GL.append(GL_pos_frag)\n # self.col_vect_frags_4_GL.append(GL_col_frag)\n for f in cont_frag:\n coord_cont[id_cont].append(f.np_id_abs - 1)\n self.dict_contigs[id_cont][\"intra_coord\"] = coord_cont[id_cont]\n self.dict_contigs[id_cont][\"frags\"] = cont_frag\n self.dict_contigs[id_cont][\"name\"] = name_contig\n self.dict_contigs[id_cont][\"tick_kb\"] = np.array(tick_kb)\n self.dict_contigs[id_cont][\"end_frags_kb\"] = np.array(end_frags_kb)\n #######################\n\n self.vect_frag_np = np.array(self.vect_frag_np, dtype=self.T_frag)\n\n # self.pos_vect_frags_4_GL = np.array(self.pos_vect_frags_4_GL,\n # dtype=self.float4) self.col_vect_frags_4_GL =\n # np.array(self.col_vect_frags_4_GL, dtype=self.float4)\n\n for key in list(self.S_o_A_frags.keys()):\n self.S_o_A_frags[key] = np.array(\n self.S_o_A_frags[key], dtype=np.int32\n )\n\n total_trans = 0\n n_tot = 0\n n_tot_intra = 0\n for id_cont in range(1, len(self.dict_contigs) + 1):\n sub_sp_mat_tmp = self.sparse_mat_csr[coord_cont[id_cont], :]\n n_full_contact = sub_sp_mat_tmp.sum()\n sub_sp_mat_tmp_csc = sub_sp_mat_tmp.tocsc()\n sub_sp_mat_tmp_intra = sub_sp_mat_tmp_csc[:, coord_cont[id_cont]]\n shape_test = sub_sp_mat_tmp_intra.shape\n test = shape_test[0] == shape_test[1]\n if not test:\n logger.info(\"MIERDA!!!!\")\n n_intra = sub_sp_mat_tmp_intra.sum()\n\n total_trans += n_full_contact\n total_trans -= n_intra\n # n_tot += (sub_sp_mat_tmp.shape[0] * sub_sp_mat_tmp.shape[1]) -\n # sub_sp_mat_tmp_intra.shape[0]*sub_sp_mat_tmp_intra.shape[1]\n n_tot_intra += shape_test[0] * (shape_test[0] - 1) / 2\n\n # for id_cont in xrange(1, len(self.dict_contigs)+1):\n # full = self.im_init[coord_cont[id_cont], :]\n # intra = self.im_init[np.ix_(coord_cont[id_cont],\n # coord_cont[id_cont])]\n # total_trans += full.sum()\n # total_trans -= intra.sum()\n # n_tot += (full.shape[0]*full.shape[1]) -\n # intra.shape[0]*intra.shape[1]\n n_tot = (\n self.sparse_mat_csr.shape[0]\n * (self.sparse_mat_csr.shape[0] - 1)\n / 2\n - n_tot_intra\n )\n self.mean_value_trans = total_trans / np.float32(n_tot)\n if np.isnan(self.mean_value_trans):\n self.mean_value_trans = np.amin(self.sparse_mat_csr.data) / 10.0\n logger.debug(\n \"computed mean trans value ... = {}\".format(self.mean_value_trans)\n )\n\n self.distri_frag = np.array(self.distri_frag)\n\n # self.define_inter_chrom_coord()\n # self.init_data()\n\n self.n_contigs = len(self.dict_contigs)", "response": "loads the data from the HIC file into the internal structure"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef load_config(args, config_path=\".inlineplz.yml\"):\n config = {}\n try:\n with open(config_path) as configfile:\n config = yaml.safe_load(configfile) or {}\n if config:\n print(\"Loaded config from {}\".format(config_path))\n pprint.pprint(config)\n except (IOError, OSError, yaml.parser.ParserError):\n traceback.print_exc()\n args = update_from_config(args, config)\n args.ignore_paths = args.__dict__.get(\"ignore_paths\") or [\n \"node_modules\",\n \".git\",\n \".tox\",\n \"godeps\",\n \"vendor\",\n \"site-packages\",\n \"venv\",\n \".env\",\n \"spec\",\n \"migrate\",\n \"bin\",\n \"fixtures\",\n \"cassettes\",\n \".cache\",\n \".idea\",\n \".pytest_cache\",\n \"__pycache__\",\n \"dist\",\n ]\n if config_path != \".inlineplz.yml\":\n return args\n\n # fall back to config_dir inlineplz yaml if we didn't find one locally\n if args.config_dir and not config:\n new_config_path = os.path.join(args.config_dir, config_path)\n if os.path.exists(new_config_path):\n return load_config(args, new_config_path)\n\n return args", "response": "Load inline - plz config from yaml config file with reasonable defaults."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nparses input file with the specified parser and post comments based on lint output.", "response": "def inline(args):\n \"\"\"\n Parse input file with the specified parser and post messages based on lint output\n\n :param args: Contains the following\n interface: How are we going to post comments?\n owner: Username of repo owner\n repo: Repository name\n pr: Pull request ID\n token: Authentication for repository\n url: Root URL of repository (not your project) Default: https://github.com\n dryrun: Prints instead of posting comments.\n zero_exit: If true: always return a 0 exit code.\n install: If true: install linters.\n max_comments: Maximum comments to write\n :return: Exit code. 1 if there are any comments, 0 if there are none.\n \"\"\"\n # don't load trusted value from config because we don't trust the config\n trusted = args.trusted\n args = load_config(args)\n print(\"Args:\")\n pprint.pprint(args)\n ret_code = 0\n\n # TODO: consider moving this git parsing stuff into the github interface\n url = args.url\n if args.repo_slug:\n owner = args.repo_slug.split(\"/\")[0]\n repo = args.repo_slug.split(\"/\")[1]\n else:\n owner = args.owner\n repo = args.repo\n if args.url:\n try:\n url_to_parse = args.url\n # giturlparse won't parse URLs that don't end in .git\n if not url_to_parse.endswith(\".git\"):\n url_to_parse += \".git\"\n parsed = giturlparse.parse(str(url_to_parse))\n url = parsed.resource\n if not url.startswith(\"https://\"):\n url = \"https://\" + url\n if parsed.owner:\n owner = parsed.owner\n if parsed.name:\n repo = parsed.name\n except giturlparse.parser.ParserError:\n pass\n if not args.dryrun and args.interface not in interfaces.INTERFACES:\n print(\"Valid inline-plz config not found\")\n return 1\n\n print(\"Using interface: {0}\".format(args.interface))\n my_interface = None\n filenames = None\n if not args.dryrun:\n my_interface = interfaces.INTERFACES[args.interface](\n owner,\n repo,\n args.pull_request,\n args.branch,\n args.token,\n url,\n args.commit,\n args.ignore_paths,\n args.prefix,\n args.autofix,\n args.set_status,\n )\n if not my_interface.is_valid():\n print(\"Invalid review. Exiting.\")\n return 0\n filenames = my_interface.filenames\n my_interface.start_review()\n try:\n linter_runner = LinterRunner(\n args.install,\n args.autorun,\n args.ignore_paths,\n args.config_dir,\n args.enabled_linters,\n args.disabled_linters,\n args.autofix,\n trusted,\n filenames,\n )\n messages = linter_runner.run_linters()\n except Exception: # pylint: disable=broad-except\n print(\"Linting failed:\\n{}\".format(traceback.format_exc()))\n print(\"inline-plz version: {}\".format(__version__))\n print(\"Python version: {}\".format(sys.version))\n ret_code = 1\n if my_interface:\n my_interface.finish_review(error=True)\n return ret_code\n\n print(\"{} lint messages found\".format(len(messages)))\n print(\"inline-plz version: {}\".format(__version__))\n print(\"Python version: {}\".format(sys.version))\n\n # TODO: implement dryrun as an interface instead of a special case here\n\n if args.dryrun:\n print_messages(messages)\n write_messages_to_json(messages)\n return ret_code\n\n try:\n if my_interface.post_messages(messages, args.max_comments):\n if not args.zero_exit:\n ret_code = 1\n if args.delete_outdated:\n my_interface.clear_outdated_messages()\n my_interface.finish_review(success=False)\n write_messages_to_json(messages)\n return ret_code\n\n if args.delete_outdated:\n my_interface.clear_outdated_messages()\n my_interface.finish_review(success=True)\n except KeyError:\n print(\"Interface not found: {}\".format(args.interface))\n traceback.print_exc()\n write_messages_to_json(messages)\n return ret_code"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ndeletes standard installation directories.", "response": "def cleanup():\n \"\"\"Delete standard installation directories.\"\"\"\n for install_dir in linters.INSTALL_DIRS:\n try:\n shutil.rmtree(install_dir, ignore_errors=True)\n except Exception:\n print(\n \"{0}\\nFailed to delete {1}\".format(\n traceback.format_exc(), install_dir\n )\n )\n sys.stdout.flush()"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\napplies a JSON data object for a service", "response": "async def apply_json(self, data):\n \"\"\"Apply a JSON data object for a service\n \"\"\"\n if not isinstance(data, list):\n data = [data]\n result = []\n for entry in data:\n if not isinstance(entry, dict):\n raise KongError('dictionary required')\n ensure = entry.pop('ensure', None)\n name = entry.pop('name', None)\n routes = entry.pop('routes', [])\n plugins = entry.pop('plugins', [])\n host = entry.pop('host', None)\n if host in LOCAL_HOST:\n host = local_ip()\n if not name:\n raise KongError('Service name is required')\n if ensure in REMOVE:\n if await self.has(name):\n await self.delete(name)\n continue\n # backward compatible with config entry\n config = entry.pop('config', None)\n if isinstance(config, dict):\n entry.update(config)\n if await self.has(name):\n srv = await self.update(name, host=host, **entry)\n else:\n srv = await self.create(name=name, host=host, **entry)\n srv.data['routes'] = await srv.routes.apply_json(routes)\n srv.data['plugins'] = await srv.plugins.apply_json(plugins)\n result.append(srv.data)\n return result"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\napply a JSON data object for a service.", "response": "async def apply_json(self, data):\n \"\"\"Apply a JSON data object for a service\n \"\"\"\n if not isinstance(data, list):\n data = [data]\n result = []\n for entry in data:\n name = entry.pop('name')\n if await self.has(name):\n sni = await self.update(name, **entry)\n else:\n sni = await self.create(name=name, **entry)\n result.append(sni.data)\n return result"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef resize(self, size):\n self.instance.resize_volume(size)\n self.size = size", "response": "Resize the volume to the specified size."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get(self, item):\n resource = super(CloudDatabaseManager, self).get(item)\n resource.volume = CloudDatabaseVolume(resource, resource.volume)\n return resource", "response": "Returns the CloudDatabaseVolume object for the given item."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating the dict required to create a Cloud Database instance.", "response": "def _create_body(self, name, flavor=None, volume=None, databases=None,\n users=None, version=None, type=None):\n \"\"\"\n Used to create the dict required to create a Cloud Database instance.\n \"\"\"\n if flavor is None:\n flavor = 1\n flavor_ref = self.api._get_flavor_ref(flavor)\n if volume is None:\n volume = 1\n if databases is None:\n databases = []\n if users is None:\n users = []\n\n body = {\"instance\": {\n \"name\": name,\n \"flavorRef\": flavor_ref,\n \"volume\": {\"size\": volume},\n \"databases\": databases,\n \"users\": users,\n }}\n\n if type is not None or version is not None:\n required = (type, version)\n if all(required):\n body['instance']['datastore'] = {\"type\": type, \"version\": version}\n else:\n raise exc.MissingCloudDatabaseParameter(\"Specifying a datastore\"\n \" requires both the datastore type as well as the version.\")\n return body"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncreates a backup of the specified instance giving it the specified name along with an optional description.", "response": "def create_backup(self, instance, name, description=None):\n \"\"\"\n Creates a backup of the specified instance, giving it the specified\n name along with an optional description.\n \"\"\"\n body = {\"backup\": {\n \"instance\": utils.get_id(instance),\n \"name\": name,\n }}\n if description is not None:\n body[\"backup\"][\"description\"] = description\n uri = \"/backups\"\n resp, resp_body = self.api.method_post(uri, body=body)\n mgr = self.api._backup_manager\n return CloudDatabaseBackup(mgr, body.get(\"backup\"))"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef restore_backup(self, backup, name, flavor, volume):\n flavor_ref = self.api._get_flavor_ref(flavor)\n body = {\"instance\": {\n \"name\": name,\n \"flavorRef\": flavor_ref,\n \"volume\": {\"size\": volume},\n \"restorePoint\": {\"backupRef\": utils.get_id(backup)},\n }}\n uri = \"/%s\" % self.uri_base\n resp, resp_body = self.api.method_post(uri, body=body)\n return CloudDatabaseInstance(self, resp_body.get(\"instance\", {}))", "response": "Restores a backup to a new database instance."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef list_backups(self, instance=None, marker=0, limit=20):\n return self.api._backup_manager.list(instance=instance, limit=limit,\n marker=marker)", "response": "Returns a paginated list of backups for a particular instance."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning a list of CloudDatabaseBackup objects for the specified instance.", "response": "def _list_backups_for_instance(self, instance, marker=0, limit=20):\n \"\"\"\n Instance-specific backups are handled through the instance manager,\n not the backup manager.\n \"\"\"\n uri = \"/%s/%s/backups?limit=%d&marker=%d\" % (self.uri_base,\n utils.get_id(instance),\n int(limit),\n int(marker))\n resp, resp_body = self.api.method_get(uri)\n mgr = self.api._backup_manager\n return [CloudDatabaseBackup(mgr, backup)\n for backup in resp_body.get(\"backups\")]"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns a list of database names for the given dbs.", "response": "def _get_db_names(self, dbs, strict=True):\n \"\"\"\n Accepts a single db (name or object) or a list of dbs, and returns a\n list of database names. If any of the supplied dbs do not exist, a\n NoSuchDatabase exception will be raised, unless you pass strict=False.\n \"\"\"\n dbs = utils.coerce_to_list(dbs)\n db_names = [utils.get_name(db) for db in dbs]\n if strict:\n good_dbs = self.instance.list_databases()\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\n bad_names = [db_name for db_name in db_names\n if db_name not in good_names]\n if bad_names:\n bad = \", \".join(bad_names)\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\n \"found: %s\" % bad)\n return db_names"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update(self, user, name=None, password=None, host=None):\n if not any((name, password, host)):\n raise exc.MissingDBUserParameters(\"You must supply at least one of \"\n \"the following: new username, new password, or new host \"\n \"specification.\")\n if not isinstance(user, CloudDatabaseUser):\n # Must be the ID/name\n user = self.get(user)\n dct = {}\n if name and (name != user.name):\n dct[\"name\"] = name\n if host and (host != user.host):\n dct[\"host\"] = host\n if password:\n dct[\"password\"] = password\n if not dct:\n raise exc.DBUpdateUnchanged(\"You must supply at least one changed \"\n \"value when updating a user.\")\n uri = \"/%s/%s\" % (self.uri_base, user.name)\n body = {\"user\": dct}\n resp, resp_body = self.api.method_put(uri, body=body)\n return None", "response": "Updates the user s attributes."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a list of all database names for which the specified user has access rights.", "response": "def list_user_access(self, user):\n \"\"\"\n Returns a list of all database names for which the specified user\n has access rights.\n \"\"\"\n user = utils.get_name(user)\n uri = \"/%s/%s/databases\" % (self.uri_base, user)\n try:\n resp, resp_body = self.api.method_get(uri)\n except exc.NotFound as e:\n raise exc.NoSuchDatabaseUser(\"User '%s' does not exist.\" % user)\n dbs = resp_body.get(\"databases\", {})\n return [CloudDatabaseDatabase(self, db) for db in dbs]"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ngrant access to the databases listed in db_names to the user.", "response": "def grant_user_access(self, user, db_names, strict=True):\n \"\"\"\n Gives access to the databases listed in `db_names` to the user. You may\n pass in either a single db or a list of dbs.\n\n If any of the databases do not exist, a NoSuchDatabase exception will\n be raised, unless you specify `strict=False` in the call.\n \"\"\"\n user = utils.get_name(user)\n uri = \"/%s/%s/databases\" % (self.uri_base, user)\n db_names = self._get_db_names(db_names, strict=strict)\n dbs = [{\"name\": db_name} for db_name in db_names]\n body = {\"databases\": dbs}\n try:\n resp, resp_body = self.api.method_put(uri, body=body)\n except exc.NotFound as e:\n raise exc.NoSuchDatabaseUser(\"User '%s' does not exist.\" % user)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef revoke_user_access(self, user, db_names, strict=True):\n user = utils.get_name(user)\n db_names = self._get_db_names(db_names, strict=strict)\n bad_names = []\n for db_name in db_names:\n uri = \"/%s/%s/databases/%s\" % (self.uri_base, user, db_name)\n resp, resp_body = self.api.method_delete(uri)", "response": "Revokes access to the databases listed in db_names for the specified user."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a paginated list of backups for a particular instance.", "response": "def list(self, instance=None, limit=20, marker=0):\n \"\"\"\n Return a paginated list of backups, or just for a particular\n instance.\n \"\"\"\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\noverriding the default get method by making the volume attribute accessible into a CloudDatabaseVolume object instead of the raw dict.", "response": "def get(self):\n \"\"\"\n Need to override the default get() behavior by making the 'volume'\n attribute into a CloudDatabaseVolume object instead of the raw dict.\n \"\"\"\n super(CloudDatabaseInstance, self).get()\n # Make the volume into an accessible object instead of a dict\n self.volume = CloudDatabaseVolume(self, self.volume)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_databases(self, limit=None, marker=None):\n return self._database_manager.list(limit=limit, marker=marker)", "response": "Returns a list of the names of all databases for this instance."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_users(self, limit=None, marker=None):\n return self._user_manager.list(limit=limit, marker=marker)", "response": "Returns a list of the names of all users for this instance."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns a CloudDatabaseUser object for the specified name.", "response": "def get_user(self, name):\n \"\"\"\n Finds the user in this instance with the specified name, and\n returns a CloudDatabaseUser object. If no match is found, a\n NoSuchDatabaseUser exception is raised.\n \"\"\"\n try:\n return self._user_manager.get(name)\n except exc.NotFound:\n raise exc.NoSuchDatabaseUser(\"No user by the name '%s' exists.\" %\n name)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a CloudDatabaseDatabase object for the specified name.", "response": "def get_database(self, name):\n \"\"\"\n Finds the database in this instance with the specified name, and\n returns a CloudDatabaseDatabase object. If no match is found, a\n NoSuchDatabase exception is raised.\n \"\"\"\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_database(self, name, character_set=None, collate=None):\n if character_set is None:\n character_set = \"utf8\"\n if collate is None:\n collate = \"utf8_general_ci\"\n self._database_manager.create(name=name, character_set=character_set,\n collate=collate, return_none=True)\n # Since the API doesn't return the info for creating the database\n # object, we have to do it manually.\n return self._database_manager.find(name=name)", "response": "Creates a new database with the specified name."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_user(self, name, password, database_names, host=None):\n if not isinstance(database_names, (list, tuple)):\n database_names = [database_names]\n # The API only accepts names, not DB objects\n database_names = [db if isinstance(db, six.string_types) else db.name\n for db in database_names]\n self._user_manager.create(name=name, password=password,\n database_names=database_names, host=host, return_none=True)\n # Since the API doesn't return the info for creating the user object,\n # we have to do it manually.\n return self._user_manager.find(name=name)", "response": "Creates a user with the specified name and password and gives that user access to the specified databases."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef delete_database(self, name_or_obj):\n name = utils.get_name(name_or_obj)\n self._database_manager.delete(name)", "response": "Deletes the specified database."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nupdates the user s credentials.", "response": "def update_user(self, user, name=None, password=None, host=None):\n \"\"\"\n Allows you to change one or more of the user's username, password, or\n host.\n \"\"\"\n return self._user_manager.update(user, name=name, password=password,\n host=host)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngranting access to the databases listed in db_names to the user.", "response": "def grant_user_access(self, user, db_names, strict=True):\n \"\"\"\n Gives access to the databases listed in `db_names` to the user.\n \"\"\"\n return self._user_manager.grant_user_access(user, db_names,\n strict=strict)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nrevokes access to the databases listed in db_names for the user.", "response": "def revoke_user_access(self, user, db_names, strict=True):\n \"\"\"\n Revokes access to the databases listed in `db_names` for the user.\n \"\"\"\n return self._user_manager.revoke_user_access(user, db_names,\n strict=strict)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "response": "Deletes the specified user from the user manager."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef enable_root_user(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_post(uri)\n return body[\"user\"][\"password\"]", "response": "Enables login from any host for the root user and provides\n the user with a generated root password."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef root_user_status(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_get(uri)\n return body[\"rootEnabled\"]", "response": "Returns True or False depending on whether the root user is enabled for this instance."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef resize(self, flavor):\n # We need the flavorRef, not the flavor or size.\n flavorRef = self.manager.api._get_flavor_ref(flavor)\n body = {\"flavorRef\": flavorRef}\n self.manager.action(self, \"resize\", body=body)", "response": "Set the size of this instance to a different flavor."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nchanging the size of the volume for this instance.", "response": "def resize_volume(self, size):\n \"\"\"Changes the size of the volume for this instance.\"\"\"\n curr_size = self.volume.size\n if size <= curr_size:\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\n \"than the current volume size of '%s'.\" % curr_size)\n body = {\"volume\": {\"size\": size}}\n self.manager.action(self, \"resize\", body=body)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list_backups(self, limit=20, marker=0):\n return self.manager._list_backups_for_instance(self, limit=limit,\n marker=marker)", "response": "Returns a paginated list of backups for this instance."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a backup of this instance giving it the specified name along with an optional description.", "response": "def create_backup(self, name, description=None):\n \"\"\"\n Creates a backup of this instance, giving it the specified name along\n with an optional description.\n \"\"\"\n return self.manager.create_backup(self, name, description=description)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update(self, name=None, password=None, host=None):\n return self.manager.update(self, name=name, password=password,\n host=host)", "response": "Update the user s identity cache."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef grant_user_access(self, db_names, strict=True):\n return self.manager.grant_user_access(self, db_names, strict=strict)", "response": "Grant access to the databases listed in db_names to the user."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef revoke_user_access(self, db_names, strict=True):\n return self.manager.revoke_user_access(self, db_names, strict=strict)", "response": "Revokes access to the databases listed in db_names for the user."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _configure_manager(self):\n self._manager = CloudDatabaseManager(self,\n resource_class=CloudDatabaseInstance, response_key=\"instance\",\n uri_base=\"instances\")\n self._flavor_manager = BaseManager(self,\n resource_class=CloudDatabaseFlavor, response_key=\"flavor\",\n uri_base=\"flavors\")\n self._backup_manager = CloudDatabaseBackupManager(self,\n resource_class=CloudDatabaseBackup, response_key=\"backup\",\n uri_base=\"backups\")", "response": "Creates a manager to handle the instances and the flavors."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "response": "Returns all databases for the specified instance."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncreating a database with the specified name on the given instance.", "response": "def create_database(self, instance, name, character_set=None,\n collate=None):\n \"\"\"Creates a database with the specified name on the given instance.\"\"\"\n return instance.create_database(name, character_set=character_set,\n collate=collate)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns all users for the specified instance.", "response": "def list_users(self, instance, limit=None, marker=None):\n \"\"\"Returns all users for the specified instance.\"\"\"\n return instance.list_users(limit=limit, marker=marker)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a user with the specified name and password and gives that user access to the specified databases.", "response": "def create_user(self, instance, name, password, database_names, host=None):\n \"\"\"\n Creates a user with the specified name and password, and gives that\n user access to the specified database(s).\n \"\"\"\n return instance.create_user(name=name, password=password,\n database_names=database_names, host=host)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the user with the given name password and host.", "response": "def update_user(self, instance, user, name=None, password=None, host=None):\n \"\"\"\n Allows you to change one or more of the user's username, password, or\n host.\n \"\"\"\n return instance.update_user(user, name=name, password=password,\n host=host)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef grant_user_access(self, instance, user, db_names, strict=True):\n return instance.grant_user_access(user, db_names, strict=strict)", "response": "Grant access to the databases listed in db_names to the user on the specified instance."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nrevoke access to the databases listed in db_names for the user on the specified instance.", "response": "def revoke_user_access(self, instance, user, db_names, strict=True):\n \"\"\"\n Revokes access to the databases listed in `db_names` for the user\n on the specified instance.\n \"\"\"\n return instance.revoke_user_access(user, db_names, strict=strict)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef list_flavors(self, limit=None, marker=None):\n return self._flavor_manager.list(limit=limit, marker=marker)", "response": "Returns a list of all available Flavors."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _get_flavor_ref(self, flavor):\n flavor_obj = None\n if isinstance(flavor, CloudDatabaseFlavor):\n flavor_obj = flavor\n elif isinstance(flavor, int):\n # They passed an ID or a size\n try:\n flavor_obj = self.get_flavor(flavor)\n except exc.NotFound:\n # Must be either a size or bad ID, which will\n # be handled below\n pass\n if flavor_obj is None:\n # Try flavor name\n flavors = self.list_flavors()\n try:\n flavor_obj = [flav for flav in flavors\n if flav.name == flavor][0]\n except IndexError:\n # No such name; try matching RAM\n try:\n flavor_obj = [flav for flav in flavors\n if flav.ram == flavor][0]\n except IndexError:\n raise exc.FlavorNotFound(\"Could not determine flavor from \"\n \"'%s'.\" % flavor)\n # OK, we have a Flavor object. Get the href\n href = [link[\"href\"] for link in flavor_obj.links\n if link[\"rel\"] == \"self\"][0]\n return href", "response": "Get the href link for a given flavor."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_backup(self, instance, name, description=None):\n return instance.create_backup(name, description=description)", "response": "Creates a backup of the specified instance giving it the specified\n name along with an optional description."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef restore_backup(self, backup, name, flavor, volume):\n return self._manager.restore_backup(backup, name, flavor, volume)", "response": "Restores a backup to a new database instance."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn the MD5 checksum of the given content.", "response": "def get_checksum(content, encoding=\"utf8\", block_size=8192):\n \"\"\"\n Returns the MD5 checksum in hex for the given content. If 'content'\n is a file-like object, the content will be obtained from its read()\n method. If 'content' is a file path, that file is read and its\n contents used. Otherwise, 'content' is assumed to be the string whose\n checksum is desired. If the content is unicode, it will be encoded\n using the specified encoding.\n\n To conserve memory, files and file-like objects will be read in blocks,\n with the default block size of 8192 bytes, which is 64 * the digest block\n size of md5 (128). This is optimal for most cases, but you can change this\n by passing in a different value for `block_size`.\n \"\"\"\n md = hashlib.md5()\n\n def safe_update(txt):\n if isinstance(txt, six.text_type):\n txt = txt.encode(encoding)\n md.update(txt)\n\n try:\n isfile = os.path.isfile(content)\n except (TypeError, ValueError):\n # Will happen with binary content.\n isfile = False\n if isfile:\n with open(content, \"rb\") as ff:\n txt = ff.read(block_size)\n while txt:\n safe_update(txt)\n txt = ff.read(block_size)\n elif hasattr(content, \"read\"):\n pos = content.tell()\n content.seek(0)\n txt = content.read(block_size)\n while txt:\n safe_update(txt)\n txt = content.read(block_size)\n content.seek(pos)\n else:\n safe_update(content)\n return md.hexdigest()"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _join_chars(chars, length):\n mult = int(length / len(chars)) + 1\n mult_chars = chars * mult\n return \"\".join(random.sample(mult_chars, length))", "response": "Join a list of random characters into a single string."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef random_unicode(length=20):\n def get_char():\n return six.unichr(random.randint(32, 1000))\n chars = u\"\".join([get_char() for ii in six.moves.range(length)])\n return _join_chars(chars, length)", "response": "Generates a random unicode string of the specified length."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef coerce_to_list(val):\n if val:\n if not isinstance(val, (list, tuple)):\n val = [val]\n else:\n val = []\n return val", "response": "Coerce the passed value to a list of strings."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the total bytes for the specified path optionally ignoring any files which match the ignore parameter.", "response": "def folder_size(pth, ignore=None):\n \"\"\"\n Returns the total bytes for the specified path, optionally ignoring\n any files which match the 'ignore' parameter. 'ignore' can either be\n a single string pattern, or a list of such patterns.\n \"\"\"\n if not os.path.isdir(pth):\n raise exc.FolderNotFound\n\n ignore = coerce_to_list(ignore)\n\n total = 0\n for root, _, names in os.walk(pth):\n paths = [os.path.realpath(os.path.join(root, nm)) for nm in names]\n for pth in paths[::-1]:\n if not os.path.exists(pth):\n paths.remove(pth)\n elif match_pattern(pth, ignore):\n paths.remove(pth)\n total += sum(os.stat(pth).st_size for pth in paths)\n\n return total"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef add_method(obj, func, name=None):\n if name is None:\n name = func.__name__\n if sys.version_info < (3,):\n method = types.MethodType(func, obj, obj.__class__)\n else:\n method = types.MethodType(func, obj)\n setattr(obj, name, method)", "response": "Adds an instance method to an object."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef wait_until(obj, att, desired, callback=None, interval=5, attempts=0,\n verbose=False, verbose_atts=None):\n \"\"\"\n When changing the state of an object, it will commonly be in a transitional\n state until the change is complete. This will reload the object every\n `interval` seconds, and check its `att` attribute until the `desired` value\n is reached, or until the maximum number of attempts is reached. The updated\n object is returned. It is up to the calling program to check the returned\n object to make sure that it successfully reached the desired state.\n\n Once the desired value of the attribute is reached, the method returns. If\n not, it will re-try until the attribute's value matches one of the\n `desired` values. By default (attempts=0) it will loop infinitely until the\n attribute reaches the desired value. You can optionally limit the number of\n times that the object is reloaded by passing a positive value to\n `attempts`. If the attribute has not reached the desired value by then, the\n method will exit.\n\n If `verbose` is True, each attempt will print out the current value of the\n watched attribute and the time that has elapsed since the original request.\n Also, if `verbose_atts` is specified, the values of those attributes will\n also be output. If `verbose` is False, then `verbose_atts` has no effect.\n\n Note that `desired` can be a list of values; if the attribute becomes equal\n to any of those values, this will succeed. For example, when creating a new\n cloud server, it will initially have a status of 'BUILD', and you can't\n work with it until its status is 'ACTIVE'. However, there might be a\n problem with the build process, and the server will change to a status of\n 'ERROR'. So for this case you need to set the `desired` parameter to\n `['ACTIVE', 'ERROR']`. If you simply pass 'ACTIVE' as the desired state,\n this will loop indefinitely if a build fails, as the server will never\n reach a status of 'ACTIVE'.\n\n Since this process of waiting can take a potentially long time, and will\n block your program's execution until the desired state of the object is\n reached, you may specify a callback function. The callback can be any\n callable that accepts a single parameter; the parameter it receives will be\n either the updated object (success), or None (failure). If a callback is\n specified, the program will return immediately after spawning the wait\n process in a separate thread.\n \"\"\"\n if callback:\n waiter = _WaitThread(obj=obj, att=att, desired=desired, callback=callback,\n interval=interval, attempts=attempts, verbose=verbose,\n verbose_atts=verbose_atts)\n waiter.start()\n return waiter\n else:\n return _wait_until(obj=obj, att=att, desired=desired, callback=None,\n interval=interval, attempts=attempts, verbose=verbose,\n verbose_atts=verbose_atts)", "response": "Waits until the state of an object is in the desired state."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nwaiting until the desired value of the attribute is reached.", "response": "def _wait_until(obj, att, desired, callback, interval, attempts, verbose,\n verbose_atts):\n \"\"\"\n Loops until either the desired value of the attribute is reached, or the\n number of attempts is exceeded.\n \"\"\"\n if not isinstance(desired, (list, tuple)):\n desired = [desired]\n if verbose_atts is None:\n verbose_atts = []\n if not isinstance(verbose_atts, (list, tuple)):\n verbose_atts = [verbose_atts]\n infinite = (attempts == 0)\n attempt = 0\n start = time.time()\n while infinite or (attempt < attempts):\n try:\n # For servers:\n obj.get()\n except AttributeError:\n try:\n # For other objects that don't support .get()\n obj = obj.manager.get(obj.id)\n except AttributeError:\n # punt\n raise exc.NoReloadError(\"The 'wait_until' method is not \"\n \"supported for '%s' objects.\" % obj.__class__)\n attval = getattr(obj, att)\n if verbose:\n elapsed = time.time() - start\n msgs = [\"Current value of %s: %s (elapsed: %4.1f seconds)\" % (\n att, attval, elapsed)]\n for vatt in verbose_atts:\n vattval = getattr(obj, vatt, None)\n msgs.append(\"%s=%s\" % (vatt, vattval))\n print(\" \".join(msgs))\n if attval in desired:\n return obj\n time.sleep(interval)\n attempt += 1\n return obj"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef wait_for_build(obj, att=None, desired=None, callback=None, interval=None,\n attempts=None, verbose=None, verbose_atts=None):\n \"\"\"\n Designed to handle the most common use case for wait_until: an object whose\n 'status' attribute will end up in either 'ACTIVE' or 'ERROR' state. Since\n builds don't happen very quickly, the interval will default to 20 seconds\n to avoid excess polling.\n \"\"\"\n att = att or \"status\"\n desired = desired or [\"ACTIVE\", \"ERROR\", \"available\", \"COMPLETED\"]\n interval = interval or 20\n attempts = attempts or 0\n verbose_atts = verbose_atts or \"progress\"\n return wait_until(obj, att, desired, callback=callback, interval=interval,\n attempts=attempts, verbose=verbose, verbose_atts=verbose_atts)", "response": "This function is used to wait until a build is complete."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _parse_datetime_string(val):\n dt = None\n lenval = len(val)\n fmt = {19: \"%Y-%m-%d %H:%M:%S\", 10: \"%Y-%m-%d\"}.get(lenval)\n if fmt is None:\n # Invalid date\n raise exc.InvalidDateTimeString(\"The supplied value '%s' does not \"\n \"match either of the formats 'YYYY-MM-DD HH:MM:SS' or \"\n \"'YYYY-MM-DD'.\" % val)\n return datetime.datetime.strptime(val, fmt)", "response": "Attempts to parse a string representation of a date or datetime value and returns a datetime. datetime object."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef iso_time_string(val, show_tzinfo=False):\n if not val:\n return \"\"\n if isinstance(val, six.string_types):\n dt = _parse_datetime_string(val)\n else:\n dt = val\n if not isinstance(dt, datetime.datetime):\n dt = datetime.datetime.fromordinal(dt.toordinal())\n has_tz = (dt.tzinfo is not None)\n if show_tzinfo and has_tz:\n # Need to remove the colon in the TZ portion\n ret = \"\".join(dt.isoformat().rsplit(\":\", 1))\n elif show_tzinfo and not has_tz:\n ret = \"%s+0000\" % dt.isoformat().split(\".\")[0]\n elif not show_tzinfo and has_tz:\n ret = dt.isoformat()[:-6]\n elif not show_tzinfo and not has_tz:\n ret = dt.isoformat().split(\".\")[0]\n return ret", "response": "Takes either a date datetime or a string and returns the ISO\n formatted string for that date or time."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef rfc2822_format(val):\n if isinstance(val, six.string_types):\n return val\n elif isinstance(val, (datetime.datetime, datetime.date)):\n # Convert to a timestamp\n val = time.mktime(val.timetuple())\n if isinstance(val, numbers.Number):\n return email.utils.formatdate(val)\n else:\n # Bail\n return val", "response": "Takes either a date a datetime or a string and returns a string that represents the value in RFC 2822 format."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef to_timestamp(val):\n # If we're given a number, give it right back - it's already a timestamp.\n if isinstance(val, numbers.Number):\n return val\n elif isinstance(val, six.string_types):\n dt = _parse_datetime_string(val)\n else:\n dt = val\n return time.mktime(dt.timetuple())", "response": "Converts a value that is either a Python date datetime or a string\n representation of a date or datetime value. Returns a standard Unix timestamp\n corresponding to that value."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the id attribute of id_or_obj.", "response": "def get_id(id_or_obj):\n \"\"\"\n Returns the 'id' attribute of 'id_or_obj' if present; if not,\n returns 'id_or_obj'.\n \"\"\"\n if isinstance(id_or_obj, six.string_types + (int,)):\n # It's an ID\n return id_or_obj\n try:\n return id_or_obj.id\n except AttributeError:\n return id_or_obj"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_name(name_or_obj):\n if isinstance(name_or_obj, six.string_types):\n # It's a name\n return name_or_obj\n try:\n return name_or_obj.name\n except AttributeError:\n raise exc.MissingName(name_or_obj)", "response": "Returns the name attribute of name_or_obj."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef params_to_dict(params, dct):\n for param, val in params.items():\n if val is None:\n continue\n dct[param] = val\n return dct", "response": "Updates the dct dictionary with the params dictionary."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ntaking a dictionary and uses it to create a query string.", "response": "def dict_to_qs(dct):\n \"\"\"\n Takes a dictionary and uses it to create a query string.\n \"\"\"\n itms = [\"%s=%s\" % (key, val) for key, val in list(dct.items())\n if val is not None]\n return \"&\".join(itms)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncomparing nm with the supplied patterns and returns True if it matches at least one.", "response": "def match_pattern(nm, patterns):\n \"\"\"\n Compares `nm` with the supplied patterns, and returns True if it matches\n at least one.\n\n Patterns are standard file-name wildcard strings, as defined in the\n `fnmatch` module. For example, the pattern \"*.py\" will match the names\n of all Python scripts.\n \"\"\"\n patterns = coerce_to_list(patterns)\n for pat in patterns:\n if fnmatch.fnmatch(nm, pat):\n return True\n return False"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update_exc(exc, msg, before=True, separator=\"\\n\"):\n emsg = exc.message\n if before:\n parts = (msg, separator, emsg)\n else:\n parts = (emsg, separator, msg)\n new_msg = \"%s%s%s\" % parts\n new_args = (new_msg, ) + exc.args[1:]\n exc.message = new_msg\n exc.args = new_args\n return exc", "response": "Update an exception s error message with the given message."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef case_insensitive_update(dct1, dct2):\n lowkeys = dict([(key.lower(), key) for key in dct1])\n for key, val in dct2.items():\n d1_key = lowkeys.get(key.lower(), key)\n dct1[d1_key] = val", "response": "Updates the first dict with the second dict."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef env(*args, **kwargs):\n for arg in args:\n value = os.environ.get(arg, None)\n if value:\n return value\n return kwargs.get(\"default\", \"\")", "response": "Returns the first environment variable set\n if none are non - empty defaults to \"\""} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition(\".\")\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)", "response": "Returns a class from a string including module and class."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ndecoding incoming text using incoming if they re not already unicode.", "response": "def safe_decode(text, incoming=None, errors='strict'):\n \"\"\"Decodes incoming text/bytes string using `incoming` if they're not\n already unicode.\n\n This function was copied from novaclient.openstack.strutils\n\n :param incoming: Text's current encoding\n :param errors: Errors handling policy. See here for valid\n values http://docs.python.org/2/library/codecs.html\n :returns: text or a unicode `incoming` encoded\n representation of it.\n :raises TypeError: If text is not an instance of str\n \"\"\"\n if not isinstance(text, (six.string_types, six.binary_type)):\n raise TypeError(\"%s can't be decoded\" % type(text))\n\n if isinstance(text, six.text_type):\n return text\n\n if not incoming:\n incoming = (sys.stdin.encoding or\n sys.getdefaultencoding())\n\n try:\n return text.decode(incoming, errors)\n except UnicodeDecodeError:\n # Note(flaper87) If we get here, it means that\n # sys.stdin.encoding / sys.getdefaultencoding\n # didn't return a suitable encoding to decode\n # text. This happens mostly when global LANG\n # var is not set correctly and there's no\n # default encoding. In this case, most likely\n # python will use ASCII or ANSI encoders as\n # default encodings but they won't be capable\n # of decoding non-ASCII characters.\n #\n # Also, UTF-8 is being used since it's an ASCII\n # extension.\n return text.decode('utf-8', errors)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nnormalize string. Convert to lowercase, remove non-word characters, and convert spaces to hyphens. This function was copied from novaclient.openstack.strutils Inspired by Django's `slugify` filter. :param value: Text to slugify :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: slugified unicode representation of `value` :raises TypeError: If text is not an instance of str", "response": "def to_slug(value, incoming=None, errors=\"strict\"):\n \"\"\"Normalize string.\n\n Convert to lowercase, remove non-word characters, and convert spaces\n to hyphens.\n\n This function was copied from novaclient.openstack.strutils\n\n Inspired by Django's `slugify` filter.\n\n :param value: Text to slugify\n :param incoming: Text's current encoding\n :param errors: Errors handling policy. See here for valid\n values http://docs.python.org/2/library/codecs.html\n :returns: slugified unicode representation of `value`\n :raises TypeError: If text is not an instance of str\n \"\"\"\n value = safe_decode(value, incoming, errors)\n # NOTE(aababilov): no need to use safe_(encode|decode) here:\n # encodings are always \"ascii\", error handling is always \"ignore\"\n # and types are always known (first: unicode; second: str)\n value = unicodedata.normalize(\"NFKD\", value).encode(\n \"ascii\", \"ignore\").decode(\"ascii\")\n value = SLUGIFY_STRIP_RE.sub(\"\", value).strip().lower()\n return SLUGIFY_HYPHENATE_RE.sub(\"-\", value)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns the next available item from the API.", "response": "def next(self):\n \"\"\"\n Return the next available item. If there are no more items in the\n local 'results' list, check if there is a 'next_uri' value. If so,\n use that to get the next page of results from the API, and return\n the first item from that query.\n \"\"\"\n try:\n return self.results.pop(0)\n except IndexError:\n if self.next_uri is None:\n raise StopIteration()\n else:\n if not self.next_uri:\n self.results = self.list_method(marker=self.marker,\n limit=self.limit, prefix=self.prefix)\n else:\n args = self.extra_args\n self.results = self._list_method(self.next_uri, *args)\n if self.results:\n last_res = self.results[-1]\n self.marker = getattr(last_res, self.marker_att)\n # We should have more results.\n try:\n return self.results.pop(0)\n except IndexError:\n raise StopIteration()"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nconverts a volumeID passed as the volume to a CloudBlockStorageVolume object.", "response": "def assure_volume(fnc):\n \"\"\"\n Converts a volumeID passed as the volume to a CloudBlockStorageVolume object.\n \"\"\"\n @wraps(fnc)\n def _wrapped(self, volume, *args, **kwargs):\n if not isinstance(volume, CloudBlockStorageVolume):\n # Must be the ID\n volume = self._manager.get(volume)\n return fnc(self, volume, *args, **kwargs)\n return _wrapped"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef assure_snapshot(fnc):\n @wraps(fnc)\n def _wrapped(self, snapshot, *args, **kwargs):\n if not isinstance(snapshot, CloudBlockStorageSnapshot):\n # Must be the ID\n snapshot = self._snapshot_manager.get(snapshot)\n return fnc(self, snapshot, *args, **kwargs)\n return _wrapped", "response": "A decorator that ensures that a snapshot ID passed as the snapshot is passed as the snapshot."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndeleting the current snapshot.", "response": "def delete(self):\n \"\"\"\n Adds a check to make sure that the snapshot is able to be deleted.\n \"\"\"\n if self.status not in (\"available\", \"error\"):\n raise exc.SnapshotNotAvailable(\"Snapshot must be in 'available' \"\n \"or 'error' status before deleting. Current status: %s\" %\n self.status)\n # When there are more thann one snapshot for a given volume, attempting to\n # delete them all will throw a 409 exception. This will help by retrying\n # such an error once after a RETRY_INTERVAL second delay.\n try:\n super(CloudBlockStorageSnapshot, self).delete()\n except exc.ClientException as e:\n if \"Request conflicts with in-progress 'DELETE\" in str(e):\n time.sleep(RETRY_INTERVAL)\n # Try again; if it fails, oh, well...\n super(CloudBlockStorageSnapshot, self).delete()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nupdating the specified values on this snapshot.", "response": "def update(self, display_name=None, display_description=None):\n \"\"\"\n Update the specified values on this snapshot. You may specify one or\n more values to update. If no values are specified as non-None, the call\n is a no-op; no exception will be raised.\n \"\"\"\n return self.manager.update(self, display_name=display_name,\n display_description=display_description)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nattaches this volume to the cloud server instance at the specified mountpoint.", "response": "def attach_to_instance(self, instance, mountpoint):\n \"\"\"\n Attaches this volume to the cloud server instance at the\n specified mountpoint. This requires a call to the cloud servers\n API; it cannot be done directly.\n \"\"\"\n instance_id = _resolve_id(instance)\n try:\n resp = self._nova_volumes.create_server_volume(instance_id,\n self.id, mountpoint)\n except Exception as e:\n raise exc.VolumeAttachmentFailed(\"%s\" % e)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndetaching this volume from any device it may be attached to.", "response": "def detach(self):\n \"\"\"\n Detaches this volume from any device it may be attached to. If it\n is not attached, nothing happens.\n \"\"\"\n attachments = self.attachments\n if not attachments:\n # Not attached; no error needed, just return\n return\n # A volume can only be attached to one device at a time, but for some\n # reason this is a list instead of a singular value\n att = attachments[0]\n instance_id = att[\"server_id\"]\n attachment_id = att[\"id\"]\n try:\n self._nova_volumes.delete_server_volume(instance_id, attachment_id)\n except Exception as e:\n raise exc.VolumeDetachmentFailed(\"%s\" % e)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete(self, force=False):\n if force:\n self.detach()\n self.delete_all_snapshots()\n try:\n super(CloudBlockStorageVolume, self).delete()\n except exc.VolumeNotAvailable:\n # Notify the user? Record it somewhere?\n # For now, just re-raise\n raise", "response": "Delete the Cloud Block Storage Volume."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create_snapshot(self, name=None, description=None, force=False):\n name = name or \"\"\n description = description or \"\"\n # Note that passing in non-None values is required for the _create_body\n # method to distinguish between this and the request to create and\n # instance.\n return self.manager.create_snapshot(volume=self, name=name,\n description=description, force=force)", "response": "Creates a snapshot of this volume with an optional name and description."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_snapshots(self):\n return [snap for snap in self.manager.list_snapshots()\n if snap.volume_id == self.id]", "response": "Returns a list of all snapshots of this volume."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _create_body(self, name, size=None, volume_type=None, description=None,\n metadata=None, snapshot_id=None, clone_id=None,\n availability_zone=None, image=None):\n \"\"\"\n Used to create the dict required to create a new volume\n \"\"\"\n if not isinstance(size, six.integer_types):\n raise exc.InvalidSize(\"Volume sizes must be integers\")\n\n if volume_type is None:\n volume_type = \"SATA\"\n if description is None:\n description = \"\"\n if metadata is None:\n metadata = {}\n if image is not None:\n image = utils.get_id(image)\n body = {\"volume\": {\n \"size\": size,\n \"snapshot_id\": snapshot_id,\n \"source_volid\": clone_id,\n \"display_name\": name,\n \"display_description\": description,\n \"volume_type\": volume_type,\n \"metadata\": metadata,\n \"availability_zone\": availability_zone,\n \"imageRef\": image,\n }}\n return body", "response": "Create the dict required to create a new volume"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create(self, *args, **kwargs):\n try:\n return super(CloudBlockStorageManager, self).create(*args,\n **kwargs)\n except exc.BadRequest as e:\n msg = e.message\n if \"Clones currently must be >= original volume size\" in msg:\n raise exc.VolumeCloneTooSmall(msg)\n else:\n raise", "response": "Creates a new object of the specified class."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update(self, volume, display_name=None, display_description=None):\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(volume))\n param_dict = {}\n if display_name:\n param_dict[\"display_name\"] = display_name\n if display_description:\n param_dict[\"display_description\"] = display_description\n if not param_dict:\n # Nothing to do!\n return\n body = {\"volume\": param_dict}\n resp, resp_body = self.api.method_put(uri, body=body)", "response": "Updates the display name and display description of the specified volume."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncreate a snapshot of a specific node.", "response": "def create_snapshot(self, volume, name, description=None, force=False):\n \"\"\"\n Pass-through method to allow the create_snapshot() call to be made\n directly on a volume.\n \"\"\"\n return self.api.create_snapshot(volume, name, description=description,\n force=force)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _create_body(self, name, description=None, volume=None, force=False):\n body = {\"snapshot\": {\n \"display_name\": name,\n \"display_description\": description,\n \"volume_id\": volume.id,\n \"force\": str(force).lower(),\n }}\n return body", "response": "Create the dict required to create a new snapshot"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a new snapshot.", "response": "def create(self, name, volume, description=None, force=False):\n \"\"\"\n Adds exception handling to the default create() call.\n \"\"\"\n try:\n snap = super(CloudBlockStorageSnapshotManager, self).create(\n name=name, volume=volume, description=description,\n force=force)\n except exc.BadRequest as e:\n msg = str(e)\n if \"Invalid volume: must be available\" in msg:\n # The volume for the snapshot was attached.\n raise exc.VolumeNotAvailable(\"Cannot create a snapshot from an \"\n \"attached volume. Detach the volume before trying \"\n \"again, or pass 'force=True' to the create_snapshot() \"\n \"call.\")\n else:\n # Some other error\n raise\n except exc.ClientException as e:\n if e.code == 409:\n if \"Request conflicts with in-progress\" in str(e):\n txt = (\"The volume is current creating a snapshot. You \"\n \"must wait until that completes before attempting \"\n \"to create an additional snapshot.\")\n raise exc.VolumeNotAvailable(txt)\n else:\n raise\n else:\n raise\n return snap"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate the manager to handle the instances and the flavors.", "response": "def _configure_manager(self):\n \"\"\"\n Create the manager to handle the instances, and also another\n to handle flavors.\n \"\"\"\n self._manager = CloudBlockStorageManager(self,\n resource_class=CloudBlockStorageVolume, response_key=\"volume\",\n uri_base=\"volumes\")\n self._types_manager = BaseManager(self,\n resource_class=CloudBlockStorageVolumeType,\n response_key=\"volume_type\", uri_base=\"types\")\n self._snapshot_manager = CloudBlockStorageSnapshotManager(self,\n resource_class=CloudBlockStorageSnapshot,\n response_key=\"snapshot\", uri_base=\"snapshots\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update(self, volume, display_name=None, display_description=None):\n return volume.update(display_name=display_name,\n display_description=display_description)", "response": "Update the specified values on the specified volume."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a snapshot of the specified volume.", "response": "def create_snapshot(self, volume, name=None, description=None, force=False):\n \"\"\"\n Creates a snapshot of the volume, with an optional name and description.\n\n Normally snapshots will not happen if the volume is attached. To\n override this default behavior, pass force=True.\n \"\"\"\n return self._snapshot_manager.create(volume=volume, name=name,\n description=description, force=force)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nupdating the specified values on the specified snapshot.", "response": "def update_snapshot(self, snapshot, display_name=None,\n display_description=None):\n \"\"\"\n Update the specified values on the specified snapshot. You may specify\n one or more values to update.\n \"\"\"\n return snapshot.update(display_name=display_name,\n display_description=display_description)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nconverts an checkID passed as the check to a CloudMonitorCheck object.", "response": "def assure_check(fnc):\n \"\"\"\n Converts an checkID passed as the check to a CloudMonitorCheck object.\n \"\"\"\n @wraps(fnc)\n def _wrapped(self, check, *args, **kwargs):\n if not isinstance(check, CloudMonitorCheck):\n # Must be the ID\n check = self._check_manager.get(check)\n return fnc(self, check, *args, **kwargs)\n return _wrapped"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef assure_entity(fnc):\n @wraps(fnc)\n def _wrapped(self, entity, *args, **kwargs):\n if not isinstance(entity, CloudMonitorEntity):\n # Must be the ID\n entity = self._entity_manager.get(entity)\n return fnc(self, entity, *args, **kwargs)\n return _wrapped", "response": "A decorator that ensures that an entityID passed as the entity is passed as the entity."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef update(self, agent=None, metadata=None):\n self.manager.update_entity(self, agent=agent, metadata=metadata)", "response": "Update the agent_id and metadata of this resource."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_check(self, check):\n chk = self._check_manager.get(check)\n chk.set_entity(self)\n return chk", "response": "Returns an instance of the specified check."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef list_checks(self, limit=None, marker=None, return_next=False):\n checks = self._check_manager.list(limit=limit, marker=marker,\n return_next=return_next)\n for check in checks:\n check.set_entity(self)\n return checks", "response": "Returns a list of the checks defined for this account."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef find_all_checks(self, **kwargs):\n checks = self._check_manager.find_all_checks(**kwargs)\n for check in checks:\n check.set_entity(self)\n return checks", "response": "Find all checks for this entity with attributes matching kwargs."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create_check(self, label=None, name=None, check_type=None,\n disabled=False, metadata=None, details=None,\n monitoring_zones_poll=None, timeout=None, period=None,\n target_alias=None, target_hostname=None, target_receiver=None,\n test_only=False, include_debug=False):\n \"\"\"\n Creates a check on this entity with the specified attributes. The\n 'details' parameter should be a dict with the keys as the option name,\n and the value as the desired setting.\n \"\"\"\n return self._check_manager.create_check(label=label, name=name,\n check_type=check_type, disabled=disabled, metadata=metadata,\n details=details, monitoring_zones_poll=monitoring_zones_poll,\n timeout=timeout, period=period, target_alias=target_alias,\n target_hostname=target_hostname,\n target_receiver=target_receiver, test_only=test_only,\n include_debug=include_debug)", "response": "Creates a check on this entity with the specified attributes."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncreate an alarm that binds the check on this entity with the notification plan.", "response": "def create_alarm(self, check, notification_plan, criteria=None,\n disabled=False, label=None, name=None, metadata=None):\n \"\"\"\n Creates an alarm that binds the check on this entity with a\n notification plan.\n \"\"\"\n return self._alarm_manager.create(check, notification_plan,\n criteria=criteria, disabled=disabled, label=label, name=name,\n metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update_alarm(self, alarm, criteria=None, disabled=False,\n label=None, name=None, metadata=None):\n \"\"\"\n Updates an existing alarm on this entity.\n \"\"\"\n return self._alarm_manager.update(alarm, criteria=criteria,\n disabled=disabled, label=label, name=name, metadata=metadata)", "response": "Updates an existing alarm on this entity."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning a list of all the alarms created on this entity.", "response": "def list_alarms(self, limit=None, marker=None, return_next=False):\n \"\"\"\n Returns a list of all the alarms created on this entity.\n \"\"\"\n return self._alarm_manager.list(limit=limit, marker=marker,\n return_next=return_next)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nlists the entries in the cache.", "response": "def list(self, limit=None, marker=None, return_next=False):\n \"\"\"\n This is necessary to handle pagination correctly, as the Monitoring\n service defines 'marker' differently than most other services. For\n monitoring, 'marker' represents the first item in the next page,\n whereas other services define it as the ID of the last item in the\n current page.\n \"\"\"\n kwargs = {}\n if return_next:\n kwargs[\"other_keys\"] = \"metadata\"\n ret = super(_PaginationManager, self).list(limit=limit,\n marker=marker, **kwargs)\n if return_next:\n ents, meta = ret\n return (ents, meta[0].get(\"next_marker\"))\n else:\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating a new alarm.", "response": "def create(self, notification_type, label=None, name=None, details=None):\n \"\"\"\n Defines a notification for handling an alarm.\n \"\"\"\n uri = \"/%s\" % self.uri_base\n body = {\"label\": label or name,\n \"type\": utils.get_id(notification_type),\n \"details\": details,\n }\n resp, resp_body = self.api.method_post(uri, body=body)\n return self.get(resp.headers[\"x-object-id\"])"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nupdates the details of a specific CloudMonitorNotification.", "response": "def update_notification(self, notification, details):\n \"\"\"\n Updates the specified notification with the supplied details.\n \"\"\"\n if isinstance(notification, CloudMonitorNotification):\n nid = notification.id\n ntyp = notification.type\n else:\n # Supplied an ID\n nfcn = self.get(notification)\n nid = notification\n ntyp = nfcn.type\n uri = \"/%s/%s\" % (self.uri_base, nid)\n body = {\"type\": ntyp,\n \"details\": details}\n resp, resp_body = self.api.method_put(uri, body=body)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn a list of all available notification types.", "response": "def list_types(self):\n \"\"\"\n Returns a list of all available notification types.\n \"\"\"\n uri = \"/notification_types\"\n resp, resp_body = self.api.method_get(uri)\n return [CloudMonitorNotificationType(self, info)\n for info in resp_body[\"values\"]]"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a CloudMonitorNotificationType object for the given ID.", "response": "def get_type(self, notification_type_id):\n \"\"\"\n Returns a CloudMonitorNotificationType object for the given ID.\n \"\"\"\n uri = \"/notification_types/%s\" % utils.get_id(notification_type_id)\n resp, resp_body = self.api.method_get(uri)\n return CloudMonitorNotificationType(self, resp_body)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate a new check taxonomy.", "response": "def create(self, label=None, name=None, critical_state=None, ok_state=None,\n warning_state=None):\n \"\"\"\n Creates a notification plan to be executed when a monitoring check\n triggers an alarm. You can optionally label (or name) the plan.\n\n A plan consists of one or more notifications to be executed when an\n associated alarm is triggered. You can have different lists of actions\n for CRITICAL, WARNING or OK states.\n \"\"\"\n uri = \"/%s\" % self.uri_base\n body = {\"label\": label or name}\n\n def make_list_of_ids(parameter):\n params = utils.coerce_to_list(parameter)\n return [utils.get_id(param) for param in params]\n\n if critical_state:\n critical_state = utils.coerce_to_list(critical_state)\n body[\"critical_state\"] = make_list_of_ids(critical_state)\n if warning_state:\n warning_state = utils.coerce_to_list(warning_state)\n body[\"warning_state\"] = make_list_of_ids(warning_state)\n if ok_state:\n ok_state = utils.coerce_to_list(ok_state)\n body[\"ok_state\"] = make_list_of_ids(ok_state)\n resp, resp_body = self.api.method_post(uri, body=body)\n return self.get(resp.headers[\"x-object-id\"])"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_metric_data_points(self, metric, start, end, points=None,\n resolution=None, stats=None):\n \"\"\"\n Returns the data points for a given metric for the given period. The\n 'start' and 'end' times must be specified; they can be be either Python\n date/datetime values, or a Unix timestamp.\n\n The 'points' parameter represents the number of points to return. The\n 'resolution' parameter represents the granularity of the data. You must\n specify either 'points' or 'resolution'. The allowed values for\n resolution are:\n FULL\n MIN5\n MIN20\n MIN60\n MIN240\n MIN1440\n\n Finally, the 'stats' parameter specifies the stats you want returned.\n By default only the 'average' is returned. You omit this parameter,\n pass in a single value, or pass in a list of values. The allowed values\n are:\n average\n variance\n min\n max\n \"\"\"\n allowed_resolutions = (\"FULL\", \"MIN5\", \"MIN20\", \"MIN60\", \"MIN240\",\n \"MIN1440\")\n if not (points or resolution):\n raise exc.MissingMonitoringCheckGranularity(\"You must specify \"\n \"either the 'points' or 'resolution' parameter when \"\n \"fetching metrics.\")\n if resolution:\n if resolution.upper() not in allowed_resolutions:\n raise exc.InvalidMonitoringMetricsResolution(\"The specified \"\n \"resolution '%s' is not valid. The valid values are: \"\n \"%s.\" % (resolution, str(allowed_resolutions)))\n start_tm = utils.to_timestamp(start)\n end_tm = utils.to_timestamp(end)\n # NOTE: For some odd reason, the timestamps required for this must be\n # in milliseconds, instead of the UNIX standard for timestamps, which\n # is in seconds. So the values here are multiplied by 1000 to make it\n # work. If the API is ever corrected, the next two lines should be\n # removed. GitHub #176.\n start_tm *= 1000\n end_tm *= 1000\n qparms = []\n # Timestamps with fractional seconds currently cause a 408 (timeout)\n qparms.append(\"from=%s\" % int(start_tm))\n qparms.append(\"to=%s\" % int(end_tm))\n if points:\n qparms.append(\"points=%s\" % points)\n if resolution:\n qparms.append(\"resolution=%s\" % resolution.upper())\n if stats:\n stats = utils.coerce_to_list(stats)\n for stat in stats:\n qparms.append(\"select=%s\" % stat)\n qparm = \"&\".join(qparms)\n uri = \"/%s/%s/plot?%s\" % (self.uri_base, metric, qparm)\n try:\n resp, resp_body = self.api.method_get(uri)\n except exc.BadRequest as e:\n msg = e.message\n dtls = e.details\n if msg.startswith(\"Validation error\"):\n raise exc.InvalidMonitoringMetricsRequest(\"Your request was \"\n \"invalid: '%s'\" % dtls)\n else:\n raise\n return resp_body[\"values\"]", "response": "This method returns the data points for a given metric for a given period."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncreating an alarm that binds the check on the given entity with the given notification plan.", "response": "def create(self, check, notification_plan, criteria=None,\n disabled=False, label=None, name=None, metadata=None):\n \"\"\"\n Creates an alarm that binds the check on the given entity with a\n notification plan.\n\n Note that the 'criteria' parameter, if supplied, should be a string\n representing the DSL for describing alerting conditions and their\n output states. Pyrax does not do any validation of these criteria\n statements; it is up to you as the developer to understand the language\n and correctly form the statement. This alarm language is documented\n online in the Cloud Monitoring section of http://docs.rackspace.com.\n \"\"\"\n uri = \"/%s\" % self.uri_base\n body = {\"check_id\": utils.get_id(check),\n \"notification_plan_id\": utils.get_id(notification_plan),\n }\n if criteria:\n body[\"criteria\"] = criteria\n if disabled is not None:\n body[\"disabled\"] = disabled\n label_name = label or name\n if label_name:\n body[\"label\"] = label_name\n if metadata:\n body[\"metadata\"] = metadata\n resp, resp_body = self.api.method_post(uri, body=body)\n if resp.status_code == 201:\n alarm_id = resp.headers[\"x-object-id\"]\n return self.get(alarm_id)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nupdating an existing alarm.", "response": "def update(self, alarm, criteria=None, disabled=False, label=None,\n name=None, metadata=None):\n \"\"\"\n Updates an existing alarm. See the comments on the 'create()' method\n regarding the criteria parameter.\n \"\"\"\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(alarm))\n body = {}\n if criteria:\n body[\"criteria\"] = criteria\n if disabled is not None:\n body[\"disabled\"] = disabled\n label_name = label or name\n if label_name:\n body[\"label\"] = label_name\n if metadata:\n body[\"metadata\"] = metadata\n resp, resp_body = self.api.method_put(uri, body=body)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a check on the specified entity.", "response": "def create_check(self, label=None, name=None, check_type=None,\n details=None, disabled=False, metadata=None,\n monitoring_zones_poll=None, timeout=None, period=None,\n target_alias=None, target_hostname=None, target_receiver=None,\n test_only=False, include_debug=False):\n \"\"\"\n Creates a check on the entity with the specified attributes. The\n 'details' parameter should be a dict with the keys as the option name,\n and the value as the desired setting.\n\n If the 'test_only' parameter is True, then the check is not created;\n instead, the check is run and the results of the test run returned. If\n 'include_debug' is True, additional debug information is returned.\n According to the current Cloud Monitoring docs:\n \"Currently debug information is only available for the\n remote.http check and includes the response body.\"\n \"\"\"\n if details is None:\n raise exc.MissingMonitoringCheckDetails(\"The required 'details' \"\n \"parameter was not passed to the create_check() method.\")\n ctype = utils.get_id(check_type)\n is_remote = ctype.startswith(\"remote\")\n monitoring_zones_poll = utils.coerce_to_list(monitoring_zones_poll)\n monitoring_zones_poll = [utils.get_id(mzp)\n for mzp in monitoring_zones_poll]\n # only require monitoring_zones and targets for remote checks\n if is_remote:\n if not monitoring_zones_poll:\n raise exc.MonitoringZonesPollMissing(\"You must specify the \"\n \"'monitoring_zones_poll' parameter for remote checks.\")\n if not (target_alias or target_hostname):\n raise exc.MonitoringCheckTargetNotSpecified(\"You must \"\n \"specify either the 'target_alias' or 'target_hostname' \"\n \"when creating a remote check.\")\n body = {\"label\": label or name,\n \"details\": details,\n \"disabled\": disabled,\n \"type\": utils.get_id(check_type),\n }\n params = (\"monitoring_zones_poll\", \"timeout\", \"period\",\n \"target_alias\", \"target_hostname\", \"target_receiver\")\n body = _params_to_dict(params, body, locals())\n if test_only:\n uri = \"/%s/test-check\" % self.uri_base\n if include_debug:\n uri = \"%s?debug=true\" % uri\n else:\n uri = \"/%s\" % self.uri_base\n try:\n resp = self.api.method_post(uri, body=body)[0]\n except exc.BadRequest as e:\n msg = e.message\n dtls = e.details\n match = _invalid_key_pat.match(msg)\n if match:\n missing = match.groups()[0].replace(\"details.\", \"\")\n if missing in details:\n errmsg = \"\".join([\"The value passed for '%s' in the \",\n \"details parameter is not valid.\"]) % missing\n else:\n errmsg = \"\".join([\"The required value for the '%s' \",\n \"setting is missing from the 'details' \",\n \"parameter.\"]) % missing\n utils.update_exc(e, errmsg)\n raise e\n else:\n if msg == \"Validation error\":\n # Info is in the 'details'\n raise exc.InvalidMonitoringCheckDetails(\"Validation \"\n \"failed. Error: '%s'.\" % dtls)\n # its something other than validation error; probably\n # limits exceeded, but raise it instead of failing silently\n raise e\n else:\n if resp.status_code == 201:\n check_id = resp.headers[\"x-object-id\"]\n return self.get(check_id)\n # don't fail silently here either; raise an error\n # if we get an unexpected response code\n raise exc.ClientException(\"Unknown response code creating check;\"\n \" expected 201, got %s\" % resp.status_code)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef find_all_checks(self, **kwargs):\n found = []\n searches = kwargs.items()\n for obj in self.list():\n try:\n if all(getattr(obj, attr) == value\n for (attr, value) in searches):\n found.append(obj)\n except AttributeError:\n continue\n return found", "response": "Finds all checks for a given entity with attributes matching\n ."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a dictionary of data optionally filtered for a given entity.", "response": "def list(self, entity=None):\n \"\"\"\n Returns a dictionary of data, optionally filtered for a given entity.\n \"\"\"\n uri = \"/%s\" % self.uri_base\n if entity:\n uri = \"%s?entityId=%s\" % (uri, utils.get_id(entity))\n resp, resp_body = self._list(uri, return_raw=True)\n return resp_body"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncreate the dict required to create the resource.", "response": "def _create_body(self, name, label=None, agent=None, ip_addresses=None,\n metadata=None):\n \"\"\"\n Used to create the dict required to create various resources. Accepts\n either 'label' or 'name' as the keyword parameter for the label\n attribute for entities.\n \"\"\"\n label = label or name\n if ip_addresses is not None:\n body = {\"label\": label}\n if ip_addresses:\n body[\"ip_addresses\"] = ip_addresses\n if agent:\n body[\"agent_id\"] = utils.get_id(agent)\n if metadata:\n body[\"metadata\"] = metadata\n return body"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update_entity(self, entity, agent=None, metadata=None):\n body = {}\n if agent:\n body[\"agent_id\"] = utils.get_id(agent)\n if metadata:\n body[\"metadata\"] = metadata\n if body:\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(entity))\n resp, body = self.api.method_put(uri, body=body)", "response": "Updates the specified entity s attributes with the supplied parameters."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreloading the check with its current values.", "response": "def get(self):\n \"\"\"Reloads the check with its current values.\"\"\"\n new = self.manager.get(self)\n if new:\n self._add_details(new._info)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update(self, label=None, name=None, disabled=None, metadata=None,\n monitoring_zones_poll=None, timeout=None, period=None,\n target_alias=None, target_hostname=None, target_receiver=None):\n \"\"\"\n Updates an existing check with any of the parameters.\n \"\"\"\n self.manager.update(self, label=label, name=name,\n disabled=disabled, metadata=metadata,\n monitoring_zones_poll=monitoring_zones_poll, timeout=timeout,\n period=period, target_alias=target_alias,\n target_hostname=target_hostname,\n target_receiver=target_receiver)", "response": "Updates the properties of the current check."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list_metrics(self, limit=None, marker=None, return_next=False):\n return self._metrics_manager.list(limit=limit, marker=marker,\n return_next=return_next)", "response": "Returns a list of all the metrics associated with this check."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_metric_data_points(self, metric, start, end, points=None,\n resolution=None, stats=None):\n \"\"\"\n Returns the data points for a given metric for the given period. The\n 'start' and 'end' times must be specified; they can be be either Python\n date/datetime values, or a Unix timestamp.\n\n The 'points' parameter represents the number of points to return. The\n 'resolution' parameter represents the granularity of the data. You must\n specify either 'points' or 'resolution'. The allowed values for\n resolution are:\n FULL\n MIN5\n MIN20\n MIN60\n MIN240\n MIN1440\n\n Finally, the 'stats' parameter specifies the stats you want returned.\n By default only the 'average' is returned. You omit this parameter,\n pass in a single value, or pass in a list of values. The allowed values\n are:\n average\n variance\n min\n max\n \"\"\"\n return self._metrics_manager.get_metric_data_points(metric, start, end,\n points=points, resolution=resolution, stats=stats)", "response": "Returns the data points for a given metric for a given period."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create_alarm(self, notification_plan, criteria=None, disabled=False,\n label=None, name=None, metadata=None):\n \"\"\"\n Creates an alarm that binds this check with a notification plan.\n \"\"\"\n return self.manager.create_alarm(self.entity, self, notification_plan,\n criteria=criteria, disabled=disabled, label=label, name=name,\n metadata=metadata)", "response": "Creates an alarm that binds this check with a notification plan."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nfetching the current state of the alarm from the API and updates the object with the new state.", "response": "def get(self):\n \"\"\"\n Fetches the current state of the alarm from the API and updates the\n object.\n \"\"\"\n new_alarm = self.entity.get_alarm(self)\n if new_alarm:\n self._add_details(new_alarm._info)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncreate the Manager instances to handle monitoring.", "response": "def _configure_manager(self):\n \"\"\"\n Creates the Manager instances to handle monitoring.\n \"\"\"\n self._entity_manager = CloudMonitorEntityManager(self,\n uri_base=\"entities\", resource_class=CloudMonitorEntity,\n response_key=None, plural_response_key=None)\n self._check_type_manager = _PaginationManager(self,\n uri_base=\"check_types\", resource_class=CloudMonitorCheckType,\n response_key=None, plural_response_key=None)\n self._monitoring_zone_manager = BaseManager(self,\n uri_base=\"monitoring_zones\", resource_class=CloudMonitorZone,\n response_key=None, plural_response_key=None)\n self._notification_manager = CloudMonitorNotificationManager(self,\n uri_base=\"notifications\",\n resource_class=CloudMonitorNotification,\n response_key=None, plural_response_key=None)\n self._notification_plan_manager = CloudMonitorNotificationPlanManager(\n self, uri_base=\"notification_plans\",\n resource_class=CloudMonitorNotificationPlan,\n response_key=None, plural_response_key=None)\n self._changelog_manager = _EntityFilteringManger(self,\n uri_base=\"changelogs/alarms\", resource_class=None,\n response_key=None, plural_response_key=None)\n self._overview_manager = _EntityFilteringManger(self,\n uri_base=\"views/overview\", resource_class=None,\n response_key=\"value\", plural_response_key=None)\n self._token_manager = CloudMonitorTokenManager(self)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nupdating the entity with the given entity.", "response": "def update_entity(self, entity, agent=None, metadata=None):\n \"\"\"\n Only the agent_id and metadata are able to be updated via the API.\n \"\"\"\n self._entity_manager.update_entity(entity, agent=agent,\n metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nupdate an existing check in the given object.", "response": "def update_check(self, entity, check, label=None, name=None, disabled=None,\n metadata=None, monitoring_zones_poll=None, timeout=None,\n period=None, target_alias=None, target_hostname=None,\n target_receiver=None):\n \"\"\"\n Updates an existing check with any of the parameters.\n \"\"\"\n entity.update_check(check, label=label, name=name, disabled=disabled,\n metadata=metadata, monitoring_zones_poll=monitoring_zones_poll,\n timeout=timeout, period=period, target_alias=target_alias,\n target_hostname=target_hostname,\n target_receiver=target_receiver)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list_metrics(self, entity, check, limit=None, marker=None,\n return_next=False):\n \"\"\"\n Returns a list of all the metrics associated with the specified check.\n \"\"\"\n return entity.list_metrics(check, limit=limit, marker=marker,\n return_next=return_next)", "response": "Returns a list of all the metrics associated with the specified check."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate a new notification for handling an alarm.", "response": "def create_notification(self, notification_type, label=None, name=None,\n details=None):\n \"\"\"\n Defines a notification for handling an alarm.\n \"\"\"\n return self._notification_manager.create(notification_type,\n label=label, name=name, details=details)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_notification_plan(self, label=None, name=None,\n critical_state=None, ok_state=None, warning_state=None):\n \"\"\"\n Creates a notification plan to be executed when a monitoring check\n triggers an alarm.\n \"\"\"\n return self._notification_plan_manager.create(label=label, name=name,\n critical_state=critical_state, ok_state=ok_state,\n warning_state=warning_state)", "response": "Creates a notification plan for a check\n ."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nupdates an existing alarm on the given entity.", "response": "def update_alarm(self, entity, alarm, criteria=None, disabled=False,\n label=None, name=None, metadata=None):\n \"\"\"\n Updates an existing alarm on the given entity.\n \"\"\"\n return entity.update_alarm(alarm, criteria=criteria, disabled=disabled,\n label=label, name=name, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a list of all the alarms created on the specified entity.", "response": "def list_alarms(self, entity, limit=None, marker=None, return_next=False):\n \"\"\"\n Returns a list of all the alarms created on the specified entity.\n \"\"\"\n return entity.list_alarms(limit=limit, marker=marker,\n return_next=return_next)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nhandles all the common functionality required for API calls Returns the response object and the body that will be used to make the API call.", "response": "def request(method, uri, *args, **kwargs):\n \"\"\"\n Handles all the common functionality required for API calls. Returns\n the resulting response object.\n\n Formats the request into a dict representing the headers\n and body that will be used to make the API call.\n \"\"\"\n req_method = req_methods[method.upper()]\n raise_exception = kwargs.pop(\"raise_exception\", True)\n raw_content = kwargs.pop(\"raw_content\", False)\n kwargs[\"headers\"] = kwargs.get(\"headers\", {})\n http_log_req(method, uri, args, kwargs)\n data = None\n if \"data\" in kwargs:\n # The 'data' kwarg is used when you don't want json encoding.\n data = kwargs.pop(\"data\")\n elif \"body\" in kwargs:\n if \"Content-Type\" not in kwargs[\"headers\"]:\n kwargs[\"headers\"][\"Content-Type\"] = \"application/json\"\n data = json.dumps(kwargs.pop(\"body\"))\n if data:\n resp = req_method(uri, data=data, **kwargs)\n else:\n resp = req_method(uri, **kwargs)\n if raw_content:\n body = resp.content\n else:\n try:\n body = resp.json()\n except ValueError:\n # No JSON in response\n body = resp.content\n http_log_resp(resp, body)\n if resp.status_code >= 400 and raise_exception:\n raise exc.from_response(resp, body)\n return resp, body"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nlogging the HTTP request to the logger.", "response": "def http_log_req(method, uri, args, kwargs):\n \"\"\"\n When pyrax.get_http_debug() is True, outputs the equivalent `curl`\n command for the API request being made.\n \"\"\"\n if not pyrax.get_http_debug():\n return\n string_parts = [\"curl -i -X %s\" % method]\n for element in args:\n string_parts.append(\"%s\" % element)\n for element in kwargs[\"headers\"]:\n header = \"-H '%s: %s'\" % (element, kwargs[\"headers\"][element])\n string_parts.append(header)\n string_parts.append(uri)\n log = logging.getLogger(\"pyrax\")\n log.debug(\"\\nREQ: %s\\n\" % \" \".join(string_parts))\n if \"body\" in kwargs:\n pyrax._logger.debug(\"REQ BODY: %s\\n\" % (kwargs[\"body\"]))\n if \"data\" in kwargs:\n pyrax._logger.debug(\"REQ DATA: %s\\n\" % (kwargs[\"data\"]))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nlogging the response to the log.", "response": "def http_log_resp(resp, body):\n \"\"\"\n When pyrax.get_http_debug() is True, outputs the response received\n from the API request.\n \"\"\"\n if not pyrax.get_http_debug():\n return\n log = logging.getLogger(\"pyrax\")\n log.debug(\"RESP: %s\\n%s\", resp, resp.headers)\n if body:\n log.debug(\"RESP BODY: %s\", body)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nassures that a Container or a name of a container is passed.", "response": "def assure_container(fnc):\n \"\"\"\n Assures that whether a Container or a name of a container is passed, a\n Container object is available.\n \"\"\"\n @wraps(fnc)\n def _wrapped(self, container, *args, **kwargs):\n if not isinstance(container, Container):\n # Must be the name\n container = self.get(container)\n return fnc(self, container, *args, **kwargs)\n return _wrapped"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a copy of the supplied dictionary with any keys that do not begin with the specified prefix accordingly.", "response": "def _massage_metakeys(dct, prfx):\n \"\"\"\n Returns a copy of the supplied dictionary, prefixing any keys that do\n not begin with the specified prefix accordingly.\n \"\"\"\n lowprefix = prfx.lower()\n ret = {}\n for k, v in list(dct.items()):\n if not k.lower().startswith(lowprefix):\n k = \"%s%s\" % (prfx, k)\n ret[k] = v\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the size of a file - like object.", "response": "def get_file_size(fileobj):\n \"\"\"\n Returns the size of a file-like object.\n \"\"\"\n currpos = fileobj.tell()\n fileobj.seek(0, 2)\n total_size = fileobj.tell()\n fileobj.seek(currpos)\n return total_size"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _set_cdn_defaults(self):\n if self._cdn_enabled is FAULT:\n self._cdn_enabled = False\n self._cdn_uri = None\n self._cdn_ttl = DEFAULT_CDN_TTL\n self._cdn_ssl_uri = None\n self._cdn_streaming_uri = None\n self._cdn_ios_uri = None\n self._cdn_log_retention = False", "response": "Sets all the CDN - related attributes to default values."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _fetch_cdn_data(self):\n if self._cdn_enabled is FAULT:\n headers = self.manager.fetch_cdn_data(self)\n else:\n headers = {}\n # Set defaults in case not all headers are present.\n self._set_cdn_defaults()\n if not headers:\n # Not CDN enabled; return\n return\n else:\n self._cdn_enabled = True\n for key, value in headers.items():\n low_key = key.lower()\n if low_key == \"x-cdn-uri\":\n self._cdn_uri = value\n elif low_key == \"x-ttl\":\n self._cdn_ttl = int(value)\n elif low_key == \"x-cdn-ssl-uri\":\n self._cdn_ssl_uri = value\n elif low_key == \"x-cdn-streaming-uri\":\n self._cdn_streaming_uri = value\n elif low_key == \"x-cdn-ios-uri\":\n self._cdn_ios_uri = value\n elif low_key == \"x-log-retention\":\n self._cdn_log_retention = (value == \"True\")", "response": "Fetches the container s CDN data from the CDN service."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef set_metadata(self, metadata, clear=False, prefix=None):\n return self.manager.set_metadata(self, metadata, clear=clear,\n prefix=prefix)", "response": "Sets the container s metadata."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef purge_cdn_object(self, obj, email_addresses=None):\n return self.object_manager.purge(obj, email_addresses=email_addresses)", "response": "Removes an object from public access before the TTL expires."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_object(self, item):\n if isinstance(item, six.string_types):\n item = self.object_manager.get(item)\n return item", "response": "Returns a StorageObject matching the specified item."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nlisting the objects in this container using the parameters to control the marker limit and delimiter end_marker and return_raw.", "response": "def list(self, marker=None, limit=None, prefix=None, delimiter=None,\n end_marker=None, full_listing=False, return_raw=False):\n \"\"\"\n List the objects in this container, using the parameters to control the\n number and content of objects. Note that this is limited by the\n absolute request limits of Swift (currently 10,000 objects). If you\n need to list all objects in the container, use the `list_all()` method\n instead.\n \"\"\"\n if full_listing:\n return self.list_all(prefix=prefix)\n else:\n return self.object_manager.list(marker=marker, limit=limit,\n prefix=prefix, delimiter=delimiter, end_marker=end_marker,\n return_raw=return_raw)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns a list of the names of all the objects in this container.", "response": "def list_object_names(self, marker=None, limit=None, prefix=None,\n delimiter=None, end_marker=None, full_listing=False):\n \"\"\"\n Returns a list of the names of all the objects in this container. The\n same pagination parameters apply as in self.list().\n \"\"\"\n if full_listing:\n objects = self.list_all(prefix=prefix)\n else:\n objects = self.list(marker=marker, limit=limit, prefix=prefix,\n delimiter=delimiter, end_marker=end_marker)\n return [obj.name for obj in objects]"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create(self, file_or_path=None, data=None, obj_name=None,\n content_type=None, etag=None, content_encoding=None,\n content_length=None, ttl=None, chunked=False, metadata=None,\n chunk_size=None, headers=None, return_none=False):\n \"\"\"\n Creates or replaces a storage object in this container.\n\n The content of the object can either be a stream of bytes (`data`), or\n a file on disk (`file_or_path`). The disk file can be either an open\n file-like object, or an absolute path to the file on disk.\n\n When creating object from a data stream, you must specify the name of\n the object to be created in the container via the `obj_name` parameter.\n When working with a file, though, if no `obj_name` value is specified,\n the file`s name will be used.\n\n You may optionally set the `content_type` and `content_encoding`\n parameters; pyrax will create the appropriate headers when the object\n is stored. If no `content_type` is specified, the object storage system\n will make an intelligent guess based on the content of the object.\n\n If the size of the file is known, it can be passed as `content_length`.\n\n If you wish for the object to be temporary, specify the time it should\n be stored in seconds in the `ttl` parameter. If this is specified, the\n object will be deleted after that number of seconds.\n\n If you wish to store a stream of data (i.e., where you don't know the\n total size in advance), set the `chunked` parameter to True, and omit\n the `content_length` and `etag` parameters. This allows the data to be\n streamed to the object in the container without having to be written to\n disk first.\n \"\"\"\n return self.object_manager.create(file_or_path=file_or_path,\n data=data, obj_name=obj_name, content_type=content_type,\n etag=etag, content_encoding=content_encoding,\n content_length=content_length, ttl=ttl, chunked=chunked,\n metadata=metadata, chunk_size=chunk_size, headers=headers,\n return_none=return_none)", "response": "Creates or replaces a storage object in this container."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef store_object(self, obj_name, data, content_type=None, etag=None,\n content_encoding=None, ttl=None, return_none=False,\n headers=None, extra_info=None):\n \"\"\"\n Creates a new object in this container, and populates it with the given\n data. A StorageObject reference to the uploaded file will be returned,\n unless 'return_none' is set to True.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self.create(obj_name=obj_name, data=data,\n content_type=content_type, etag=etag,\n content_encoding=content_encoding, ttl=ttl,\n return_none=return_none, headers=headers)", "response": "Stores an object in this container."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef upload_file(self, file_or_path, obj_name=None, content_type=None,\n etag=None, return_none=False, content_encoding=None, ttl=None,\n content_length=None, headers=None):\n \"\"\"\n Uploads the specified file to this container. If no name is supplied,\n the file's name will be used. Either a file path or an open file-like\n object may be supplied. A StorageObject reference to the uploaded file\n will be returned, unless 'return_none' is set to True.\n\n You may optionally set the `content_type` and `content_encoding`\n parameters; pyrax will create the appropriate headers when the object\n is stored.\n\n If the size of the file is known, it can be passed as `content_length`.\n\n If you wish for the object to be temporary, specify the time it should\n be stored in seconds in the `ttl` parameter. If this is specified, the\n object will be deleted after that number of seconds.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self.create(file_or_path=file_or_path, obj_name=obj_name,\n content_type=content_type, etag=etag,\n content_encoding=content_encoding, headers=headers,\n content_length=content_length, ttl=ttl,\n return_none=return_none)", "response": "Uploads a file to this container."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfetches the object from storage.", "response": "def fetch(self, obj, include_meta=False, chunk_size=None, size=None,\n extra_info=None):\n \"\"\"\n Fetches the object from storage.\n\n If 'include_meta' is False, only the bytes representing the\n stored object are returned.\n\n Note: if 'chunk_size' is defined, you must fully read the object's\n contents before making another request.\n\n If 'size' is specified, only the first 'size' bytes of the object will\n be returned. If the object if smaller than 'size', the entire object is\n returned.\n\n When 'include_meta' is True, what is returned from this method is a\n 2-tuple:\n Element 0: a dictionary containing metadata about the file.\n Element 1: a stream of bytes representing the object's contents.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self.object_manager.fetch(obj, include_meta=include_meta,\n chunk_size=chunk_size, size=size)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nfetch a single object from the cache.", "response": "def fetch_object(self, obj_name, include_meta=False, chunk_size=None):\n \"\"\"\n Alias for self.fetch(); included for backwards compatibility\n \"\"\"\n return self.fetch(obj=obj_name, include_meta=include_meta,\n chunk_size=chunk_size)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ndownloading the specified object from storage and writes it to the specified directory.", "response": "def download(self, obj, directory, structure=True):\n \"\"\"\n Fetches the object from storage, and writes it to the specified\n directory. The directory must exist before calling this method.\n\n If the object name represents a nested folder structure, such as\n \"foo/bar/baz.txt\", that folder structure will be created in the target\n directory by default. If you do not want the nested folders to be\n created, pass `structure=False` in the parameters.\n \"\"\"\n return self.object_manager.download(obj, directory, structure=structure)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ndownloading an object from the cache.", "response": "def download_object(self, obj_name, directory, structure=True):\n \"\"\"\n Alias for self.download(); included for backwards compatibility\n \"\"\"\n return self.download(obj=obj_name, directory=directory,\n structure=structure)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef delete(self, del_objects=False):\n return self.manager.delete(self, del_objects=del_objects)", "response": "Deletes this Container. If the container contains objects, the\n command will fail unless 'del_objects' is passed as True. In that\n case, each object will be deleted first, and then the container."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef delete_object_in_seconds(self, obj, seconds, extra_info=None):\n return self.manager.delete_object_in_seconds(self, obj, seconds)", "response": "Deletes the specified object in the specified number of seconds."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndeleting all objects from this container.", "response": "def delete_all_objects(self, async_=False):\n \"\"\"\n Deletes all objects from this container.\n\n By default the call will block until all objects have been deleted. By\n passing True for the 'async_' parameter, this method will not block, and\n instead return an object that can be used to follow the progress of the\n deletion. When deletion is complete the bulk deletion object's\n 'results' attribute will be populated with the information returned\n from the API call. In synchronous mode this is the value that is\n returned when the call completes. It is a dictionary with the following\n keys:\n\n deleted - the number of objects deleted\n not_found - the number of objects not found\n status - the HTTP return status code. '200 OK' indicates success\n errors - a list of any errors returned by the bulk delete call\n \"\"\"\n nms = self.list_object_names(full_listing=True)\n return self.object_manager.delete_all_objects(nms, async_=async_)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef copy_object(self, obj, new_container, new_obj_name=None,\n content_type=None):\n \"\"\"\n Copies the object to the new container, optionally giving it a new name.\n If you copy to the same container, you must supply a different name.\n \"\"\"\n return self.manager.copy_object(self, obj, new_container,\n new_obj_name=new_obj_name, content_type=content_type)", "response": "Copies the object to the new container optionally giving it a new name."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nmove an object to a new container.", "response": "def move_object(self, obj, new_container, new_obj_name=None,\n new_reference=False, content_type=None, extra_info=None):\n \"\"\"\n Works just like copy_object, except that the source object is deleted\n after a successful copy.\n\n You can optionally change the content_type of the object by supplying\n that in the 'content_type' parameter.\n\n NOTE: any references to the original object will no longer be valid;\n you will have to get a reference to the new object by passing True for\n the 'new_reference' parameter. When this is True, a reference to the\n newly moved object is returned. Otherwise, the etag for the moved\n object is returned.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self.manager.move_object(self, obj, new_container,\n new_obj_name=new_obj_name, new_reference=new_reference,\n content_type=content_type)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef change_object_content_type(self, obj, new_ctype, guess=False):\n return self.manager.change_object_content_type(self, obj, new_ctype,\n guess=guess)", "response": "Copies an object to itself but applies a new content - type."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ngives a storage object in this container returns a temporary URL that can be used to access that object.", "response": "def get_temp_url(self, obj, seconds, method=\"GET\", key=None, cached=True):\n \"\"\"\n Given a storage object in this container, returns a URL that can be\n used to access that object. The URL will expire after `seconds`\n seconds.\n\n The only methods supported are GET and PUT. Anything else will raise an\n `InvalidTemporaryURLMethod` exception.\n\n If you have your Temporary URL key, you can pass it in directly and\n potentially save an API call to retrieve it. If you don't pass in the\n key, and don't wish to use any cached value, pass `cached=False`.\n \"\"\"\n return self.manager.get_temp_url(self, obj, seconds, method=method,\n key=key, cached=cached)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting the object s metadata to the given dictionary.", "response": "def set_object_metadata(self, obj, metadata, clear=False, extra_info=None,\n prefix=None):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates the\n specified object metadata with them.\n\n If 'clear' is True, any existing metadata is deleted and only the\n passed metadata is retained. Otherwise, the values passed here update\n the object's metadata.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n\n By default, the standard object metadata prefix ('X-Object-Meta-') is\n prepended to the header name if it isn't present. For non-standard\n headers, you must include a non-None prefix, such as an empty string.\n \"\"\"\n return self.object_manager.set_metadata(obj, metadata, clear=clear,\n prefix=prefix)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef list_subdirs(self, marker=None, limit=None, prefix=None, delimiter=None,\n full_listing=False):\n \"\"\"\n Return a list of the namesrepresenting the pseudo-subdirectories in\n this container. You can use the marker and limit params to handle\n pagination, and the prefix param to filter the objects returned. The\n delimiter param is there for backwards compatibility only, as the call\n requires the delimiter to be '/'.\n \"\"\"\n return self.manager.list_subdirs(self, marker=marker, limit=limit,\n prefix=prefix, delimiter=delimiter, full_listing=full_listing)", "response": "Return a list of the pseudo - directories in the infrastructure."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list(self, limit=None, marker=None, end_marker=None, prefix=None):\n uri = \"/%s\" % self.uri_base\n qs = utils.dict_to_qs({\"marker\": marker, \"limit\": limit,\n \"prefix\": prefix, \"end_marker\": end_marker})\n if qs:\n uri = \"%s?%s\" % (uri, qs)\n resp, resp_body = self.api.method_get(uri)\n return [Container(self, res, loaded=False)\n for res in resp_body if res]", "response": "Returns a list of all the available container resources."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get(self, container):\n name = utils.get_name(container)\n uri = \"/%s\" % name\n resp, resp_body = self.api.method_head(uri)\n hdrs = resp.headers\n data = {\"total_bytes\": int(hdrs.get(\"x-container-bytes-used\", \"0\")),\n \"object_count\": int(hdrs.get(\"x-container-object-count\", \"0\")),\n \"name\": name}\n return Container(self, data, loaded=False)", "response": "Returns a Container object matching the specified container name."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a new container with the specified name and metadata.", "response": "def create(self, name, metadata=None, prefix=None, *args, **kwargs):\n \"\"\"\n Creates a new container, and returns a Container object that represents\n that contianer. If a container by the same name already exists, no\n exception is raised; instead, a reference to that existing container is\n returned.\n \"\"\"\n uri = \"/%s\" % name\n headers = {}\n if prefix is None:\n prefix = CONTAINER_META_PREFIX\n if metadata:\n metadata = _massage_metakeys(metadata, prefix)\n headers = metadata\n resp, resp_body = self.api.method_put(uri, headers=headers)\n if resp.status_code in (201, 202):\n hresp, hresp_body = self.api.method_head(uri)\n num_obj = int(hresp.headers.get(\"x-container-object-count\", \"0\"))\n num_bytes = int(hresp.headers.get(\"x-container-bytes-used\", \"0\"))\n cont_info = {\"name\": name, \"object_count\": num_obj,\n \"total_bytes\": num_bytes}\n return Container(self, cont_info)\n elif resp.status_code == 400:\n raise exc.ClientException(\"Container creation failed: %s\" %\n resp_body)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes the specified container.", "response": "def delete(self, container, del_objects=False):\n \"\"\"\n Deletes the specified container. If the container contains objects, the\n command will fail unless 'del_objects' is passed as True. In that case,\n each object will be deleted first, and then the container.\n \"\"\"\n if del_objects:\n nms = self.list_object_names(container, full_listing=True)\n self.api.bulk_delete(container, nms, async_=False)\n uri = \"/%s\" % utils.get_name(container)\n resp, resp_body = self.api.method_delete(uri)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns the CDN - enabled data for the specified container.", "response": "def fetch_cdn_data(self, container):\n \"\"\"\n Returns a dict containing the CDN information for the specified\n container. If the container is not CDN-enabled, returns an empty dict.\n \"\"\"\n name = utils.get_name(container)\n uri = \"/%s\" % name\n try:\n resp, resp_body = self.api.cdn_request(uri, \"HEAD\")\n except exc.NotCDNEnabled:\n return {}\n return resp.headers"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_headers(self, container):\n uri = \"/%s\" % utils.get_name(container)\n resp, resp_body = self.api.method_head(uri)\n return resp.headers", "response": "Returns the headers for the specified container."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_account_metadata(self, prefix=None):\n headers = self.get_account_headers()\n if prefix is None:\n prefix = ACCOUNT_META_PREFIX\n low_prefix = prefix.lower()\n ret = {}\n for hkey, hval in list(headers.items()):\n lowkey = hkey.lower()\n if lowkey.startswith(low_prefix):\n cleaned = hkey.replace(low_prefix, \"\").replace(\"-\", \"_\")\n ret[cleaned] = hval\n return ret", "response": "Returns a dictionary containing metadata about the account."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsets the metadata for the specified account.", "response": "def set_account_metadata(self, metadata, clear=False, prefix=None):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates the\n account metadata with them.\n\n If 'clear' is True, any existing metadata is deleted and only the\n passed metadata is retained. Otherwise, the values passed here update\n the account's metadata.\n\n By default, the standard account metadata prefix ('X-Account-Meta-') is\n prepended to the header name if it isn't present. For non-standard\n headers, you must include a non-None prefix, such as an empty string.\n \"\"\"\n # Add the metadata prefix, if needed.\n if prefix is None:\n prefix = ACCOUNT_META_PREFIX\n massaged = _massage_metakeys(metadata, prefix)\n new_meta = {}\n if clear:\n curr_meta = self.get_account_metadata(prefix=prefix)\n for ckey in curr_meta:\n new_meta[ckey] = \"\"\n new_meta = _massage_metakeys(new_meta, prefix)\n utils.case_insensitive_update(new_meta, massaged)\n uri = \"/\"\n resp, resp_body = self.api.method_post(uri, headers=new_meta)\n return 200 <= resp.status_code <= 299"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nremoving all metadata matching the specified prefix from the account.", "response": "def delete_account_metadata(self, prefix=None):\n \"\"\"\n Removes all metadata matching the specified prefix from the account.\n\n By default, the standard account metadata prefix ('X-Account-Meta-') is\n prepended to the header name if it isn't present. For non-standard\n headers, you must include a non-None prefix, such as an empty string.\n \"\"\"\n # Add the metadata prefix, if needed.\n if prefix is None:\n prefix = ACCOUNT_META_PREFIX\n curr_meta = self.get_account_metadata(prefix=prefix)\n for ckey in curr_meta:\n curr_meta[ckey] = \"\"\n new_meta = _massage_metakeys(curr_meta, prefix)\n uri = \"/\"\n resp, resp_body = self.api.method_post(uri, headers=new_meta)\n return 200 <= resp.status_code <= 299"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning a dictionary containing the metadata for the container.", "response": "def get_metadata(self, container, prefix=None):\n \"\"\"\n Returns a dictionary containing the metadata for the container.\n \"\"\"\n headers = self.get_headers(container)\n if prefix is None:\n prefix = CONTAINER_META_PREFIX\n low_prefix = prefix.lower()\n ret = {}\n for hkey, hval in list(headers.items()):\n if hkey.lower().startswith(low_prefix):\n cleaned = hkey.replace(low_prefix, \"\").replace(\"-\", \"_\")\n ret[cleaned] = hval\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nsets the metadata for the specified container.", "response": "def set_metadata(self, container, metadata, clear=False, prefix=None):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates the\n specified container metadata with them.\n\n If 'clear' is True, any existing metadata is deleted and only the\n passed metadata is retained. Otherwise, the values passed here update\n the container's metadata.\n\n By default, the standard container metadata prefix\n ('X-Container-Meta-') is prepended to the header name if it isn't\n present. For non-standard headers, you must include a non-None prefix,\n such as an empty string.\n \"\"\"\n # Add the metadata prefix, if needed.\n if prefix is None:\n prefix = CONTAINER_META_PREFIX\n massaged = _massage_metakeys(metadata, prefix)\n new_meta = {}\n if clear:\n curr_meta = self.api.get_container_metadata(container,\n prefix=prefix)\n for ckey in curr_meta:\n new_meta[ckey] = \"\"\n utils.case_insensitive_update(new_meta, massaged)\n name = utils.get_name(container)\n uri = \"/%s\" % name\n resp, resp_body = self.api.method_post(uri, headers=new_meta)\n return 200 <= resp.status_code <= 299"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef remove_metadata_key(self, container, key):\n meta_dict = {key: \"\"}\n return self.set_metadata(container, meta_dict)", "response": "Removes the specified key from the container s metadata."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nremoves all of the metadata for the specified container.", "response": "def delete_metadata(self, container, prefix=None):\n \"\"\"\n Removes all of the container's metadata.\n\n By default, all metadata beginning with the standard container metadata\n prefix ('X-Container-Meta-') is removed. If you wish to remove all\n metadata beginning with a different prefix, you must specify that\n prefix.\n \"\"\"\n # Add the metadata prefix, if needed.\n if prefix is None:\n prefix = CONTAINER_META_PREFIX\n new_meta = {}\n curr_meta = self.get_metadata(container, prefix=prefix)\n for ckey in curr_meta:\n new_meta[ckey] = \"\"\n uri = \"/%s\" % utils.get_name(container)\n resp, resp_body = self.api.method_post(uri, headers=new_meta)\n return 200 <= resp.status_code <= 299"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the CDN metadata for the specified container.", "response": "def get_cdn_metadata(self, container):\n \"\"\"\n Returns a dictionary containing the CDN metadata for the container. If\n the container does not exist, a NotFound exception is raised. If the\n container exists, but is not CDN-enabled, a NotCDNEnabled exception is\n raised.\n \"\"\"\n uri = \"%s/%s\" % (self.uri_base, utils.get_name(container))\n resp, resp_body = self.api.cdn_request(uri, \"HEAD\")\n ret = dict(resp.headers)\n # Remove non-CDN headers\n ret.pop(\"content-length\", None)\n ret.pop(\"content-type\", None)\n ret.pop(\"date\", None)\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsets the CDN metadata for the specified container.", "response": "def set_cdn_metadata(self, container, metadata):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates\n the specified container metadata with them.\n\n NOTE: arbitrary metadata headers are not allowed. The only metadata\n you can update are: X-Log-Retention, X-CDN-enabled, and X-TTL.\n \"\"\"\n allowed = (\"x-log-retention\", \"x-cdn-enabled\", \"x-ttl\")\n hdrs = {}\n bad = []\n for mkey, mval in six.iteritems(metadata):\n if mkey.lower() not in allowed:\n bad.append(mkey)\n continue\n hdrs[mkey] = str(mval)\n if bad:\n raise exc.InvalidCDNMetadata(\"The only CDN metadata you can \"\n \"update are: X-Log-Retention, X-CDN-enabled, and X-TTL. \"\n \"Received the following illegal item(s): %s\" %\n \", \".join(bad))\n uri = \"%s/%s\" % (self.uri_base, utils.get_name(container))\n resp, resp_body = self.api.cdn_request(uri, \"POST\", headers=hdrs)\n return resp"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngives a storage object in a container returns a temporary URL that can be used to access that object.", "response": "def get_temp_url(self, container, obj, seconds, method=\"GET\", key=None,\n cached=True):\n \"\"\"\n Given a storage object in a container, returns a URL that can be used\n to access that object. The URL will expire after `seconds` seconds.\n\n The only methods supported are GET and PUT. Anything else will raise\n an `InvalidTemporaryURLMethod` exception.\n\n If you have your Temporary URL key, you can pass it in directly and\n potentially save an API call to retrieve it. If you don't pass in the\n key, and don't wish to use any cached value, pass `cached=False`.\n \"\"\"\n if not key:\n key = self.api.get_temp_url_key(cached=cached)\n if not key:\n raise exc.MissingTemporaryURLKey(\"You must set the key for \"\n \"Temporary URLs before you can generate them. This is \"\n \"done via the `set_temp_url_key()` method.\")\n cname = utils.get_name(container)\n oname = utils.get_name(obj)\n mod_method = method.upper().strip()\n if mod_method not in (\"GET\", \"PUT\"):\n raise exc.InvalidTemporaryURLMethod(\"Method must be either 'GET' \"\n \"or 'PUT'; received '%s'.\" % method)\n mgt_url = self.api.management_url\n mtch = re.search(r\"/v\\d/\", mgt_url)\n start = mtch.start()\n base_url = mgt_url[:start]\n path_parts = (mgt_url[start:], cname, oname)\n cleaned = (part.strip(\"/\\\\\") for part in path_parts)\n pth = \"/%s\" % \"/\".join(cleaned)\n expires = int(time.time() + int(seconds))\n try:\n key = key.encode(\"ascii\")\n hmac_body = b'\\n'.join([\n mod_method.encode(\"ascii\"),\n six.text_type(expires).encode(\"ascii\"),\n pth.encode(\"ascii\")])\n except UnicodeEncodeError:\n raise exc.UnicodePathError(\"Due to a bug in Python, the TempURL \"\n \"function only works with ASCII object paths.\")\n sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()\n temp_url = \"%s%s?temp_url_sig=%s&temp_url_expires=%s\" % (base_url, pth,\n sig, expires)\n return temp_url"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list_containers_info(self, limit=None, marker=None):\n uri = \"\"\n qs = utils.dict_to_qs({\"limit\": limit, \"marker\": marker})\n if qs:\n uri += \"%s?%s\" % (uri, qs)\n resp, resp_body = self.api.method_get(uri)\n return resp_body", "response": "Returns a list of info on Containers.\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list_public_containers(self):\n resp, resp_body = self.api.cdn_request(\"\", \"GET\")\n return [cont[\"name\"] for cont in resp_body]", "response": "Returns a list of the names of all CDN - enabled containers."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef make_public(self, container, ttl=None):\n return self._set_cdn_access(container, public=True, ttl=ttl)", "response": "Enables CDN access for the specified container and optionally sets the\n TTL for the container."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nset the CDN access for the specified container.", "response": "def _set_cdn_access(self, container, public, ttl=None):\n \"\"\"\n Enables or disables CDN access for the specified container, and\n optionally sets the TTL for the container when enabling access.\n \"\"\"\n headers = {\"X-Cdn-Enabled\": \"%s\" % public}\n if public and ttl:\n headers[\"X-Ttl\"] = ttl\n self.api.cdn_request(\"/%s\" % utils.get_name(container), method=\"PUT\",\n headers=headers)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the status of the CDN log retention for the specified container.", "response": "def get_cdn_log_retention(self, container):\n \"\"\"\n Returns the status of the setting for CDN log retention for the\n specified container.\n \"\"\"\n resp, resp_body = self.api.cdn_request(\"/%s\" %\n utils.get_name(container), method=\"HEAD\")\n return resp.headers.get(\"x-log-retention\").lower() == \"true\""} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nsets whether CDN access logs for the specified container are collected and stored on Cloud Files.", "response": "def set_cdn_log_retention(self, container, enabled):\n \"\"\"\n Enables or disables whether CDN access logs for the specified container\n are collected and stored on Cloud Files.\n \"\"\"\n headers = {\"X-Log-Retention\": \"%s\" % enabled}\n self.api.cdn_request(\"/%s\" % utils.get_name(container), method=\"PUT\",\n headers=headers)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_container_streaming_uri(self, container):\n resp, resp_body = self.api.cdn_request(\"/%s\" %\n utils.get_name(container), method=\"HEAD\")\n return resp.headers.get(\"x-cdn-streaming-uri\")", "response": "Returns the URI for streaming content for the given container."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef set_web_index_page(self, container, page):\n headers = {\"X-Container-Meta-Web-Index\": \"%s\" % page}\n self.api.cdn_request(\"/%s\" % utils.get_name(container), method=\"POST\",\n headers=headers)", "response": "Sets the header indicating the index page in a container when creating a static website."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nremoves an object from CDN - enabled public access before the TTL expires.", "response": "def purge_cdn_object(self, container, obj, email_addresses=None):\n \"\"\"\n Removes a CDN-enabled object from public access before the TTL expires.\n Please note that there is a limit (at this time) of 25 such requests;\n if you need to purge more than that, you must contact support.\n\n If one or more email_addresses are included, an email confirming the\n purge is sent to each address.\n \"\"\"\n return container.purge_cdn_object(obj, email_addresses=email_addresses)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef list_objects(self, container, limit=None, marker=None, prefix=None,\n delimiter=None, end_marker=None, full_listing=False):\n \"\"\"\n Return a list of StorageObjects representing the objects in this\n container. You can use the marker, end_marker, and limit params to\n handle pagination, and the prefix and delimiter params to filter the\n objects returned. By default only the first 10,000 objects are\n returned; if you need to access more than that, set the 'full_listing'\n parameter to True.\n \"\"\"\n if full_listing:\n return container.list_all(prefix=prefix)\n return container.list(limit=limit, marker=marker, prefix=prefix,\n delimiter=delimiter, end_marker=end_marker)", "response": "List the objects in a container."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list_object_names(self, container, marker=None, limit=None, prefix=None,\n delimiter=None, end_marker=None, full_listing=False):\n \"\"\"\n Return a list of then names of the objects in this container. You can\n use the marker, end_marker, and limit params to handle pagination, and\n the prefix and delimiter params to filter the objects returned. By\n default only the first 10,000 objects are returned; if you need to\n access more than that, set the 'full_listing' parameter to True.\n \"\"\"\n return container.list_object_names(marker=marker, limit=limit,\n prefix=prefix, delimiter=delimiter, end_marker=end_marker,\n full_listing=full_listing)", "response": "List the object names in this container."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns a list of all directories in the specified container.", "response": "def list_subdirs(self, container, marker=None, limit=None, prefix=None,\n delimiter=None, full_listing=False):\n \"\"\"\n Returns a list of StorageObjects representing the pseudo-subdirectories\n in the specified container. You can use the marker and limit params to\n handle pagination, and the prefix param to filter the objects returned.\n The 'delimiter' parameter is ignored, as the only meaningful value is\n '/'.\n \"\"\"\n mthd = container.list_all if full_listing else container.list\n objs = mthd(marker=marker, limit=limit, prefix=prefix, delimiter=\"/\",\n return_raw=True)\n sdirs = [obj for obj in objs if \"subdir\" in obj]\n mgr = container.object_manager\n return [StorageObject(mgr, sdir) for sdir in sdirs]"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndownload the specified object from storage and writes it to the specified directory.", "response": "def download_object(self, container, obj, directory, structure=True):\n \"\"\"\n Fetches the object from storage, and writes it to the specified\n directory. The directory must exist before calling this method.\n\n If the object name represents a nested folder structure, such as\n \"foo/bar/baz.txt\", that folder structure will be created in the target\n directory by default. If you do not want the nested folders to be\n created, pass `structure=False` in the parameters.\n \"\"\"\n return container.download(obj, directory, structure=structure)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncopies the object to the new container optionally giving it a new name.", "response": "def copy_object(self, container, obj, new_container, new_obj_name=None,\n content_type=None):\n \"\"\"\n Copies the object to the new container, optionally giving it a new name.\n If you copy to the same container, you must supply a different name.\n\n Returns the etag of the newly-copied object.\n\n You can optionally change the content_type of the object by supplying\n that in the 'content_type' parameter.\n \"\"\"\n nm = new_obj_name or utils.get_name(obj)\n uri = \"/%s/%s\" % (utils.get_name(new_container), nm)\n copy_from = \"/%s/%s\" % (utils.get_name(container), utils.get_name(obj))\n headers = {\"X-Copy-From\": copy_from,\n \"Content-Length\": \"0\"}\n if content_type:\n headers[\"Content-Type\"] = content_type\n resp, resp_body = self.api.method_put(uri, headers=headers)\n return resp.headers.get(\"etag\")"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nmoves an object from one container to another.", "response": "def move_object(self, container, obj, new_container, new_obj_name=None,\n new_reference=False, content_type=None):\n \"\"\"\n Works just like copy_object, except that the source object is deleted\n after a successful copy.\n\n You can optionally change the content_type of the object by supplying\n that in the 'content_type' parameter.\n\n NOTE: any references to the original object will no longer be valid;\n you will have to get a reference to the new object by passing True for\n the 'new_reference' parameter. When this is True, a reference to the\n newly moved object is returned. Otherwise, the etag for the moved\n object is returned.\n \"\"\"\n new_obj_etag = self.copy_object(container, obj, new_container,\n new_obj_name=new_obj_name, content_type=content_type)\n if not new_obj_etag:\n return\n # Copy succeeded; delete the original.\n self.delete_object(container, obj)\n if new_reference:\n nm = new_obj_name or utils.get_name(obj)\n return self.get_object(new_container, nm)\n return new_obj_etag"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncopying an object to itself but applies a new content - type.", "response": "def change_object_content_type(self, container, obj, new_ctype,\n guess=False):\n \"\"\"\n Copies object to itself, but applies a new content-type. The guess\n feature requires the container to be CDN-enabled. If not, then the\n content-type must be supplied. If using guess with a CDN-enabled\n container, new_ctype can be set to None. Failure during the put will\n result in an exception.\n \"\"\"\n cname = utils.get_name(container)\n oname = utils.get_name(obj)\n if guess and container.cdn_enabled:\n # Test against the CDN url to guess the content-type.\n obj_url = \"%s/%s\" % (container.cdn_uri, oname)\n new_ctype = mimetypes.guess_type(obj_url)[0]\n return self.copy_object(container, obj, container,\n content_type=new_ctype)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nset the object in the specified container to be deleted after the specified number of seconds.", "response": "def delete_object_in_seconds(self, cont, obj, seconds, extra_info=None):\n \"\"\"\n Sets the object in the specified container to be deleted after the\n specified number of seconds.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n meta = {\"X-Delete-After\": seconds}\n self.set_object_metadata(cont, obj, meta, clear=True, prefix=\"\")"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_object_metadata(self, container, obj, metadata, clear=False,\n extra_info=None, prefix=None):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates the\n specified object metadata with them.\n\n If 'clear' is True, any existing metadata is deleted and only the\n passed metadata is retained. Otherwise, the values passed here update\n the object's metadata.\n\n 'extra_info; is an optional dictionary which will be populated with\n 'status', 'reason', and 'headers' keys from the underlying swiftclient\n call.\n\n By default, the standard object metadata prefix ('X-Object-Meta-') is\n prepended to the header name if it isn't present. For non-standard\n headers, you must include a non-None prefix, such as an empty string.\n \"\"\"\n return container.set_object_metadata(obj, metadata, clear=clear,\n prefix=prefix)", "response": "Sets the object s metadata to the passed dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfetches the object from storage.", "response": "def get(self, include_meta=False, chunk_size=None):\n \"\"\"\n Fetches the object from storage.\n\n If 'include_meta' is False, only the bytes representing the\n file is returned.\n\n Note: if 'chunk_size' is defined, you must fully read the object's\n contents before making another request.\n\n When 'include_meta' is True, what is returned from this method is a\n 2-tuple:\n Element 0: a dictionary containing metadata about the file.\n Element 1: a stream of bytes representing the object's contents.\n \"\"\"\n return self.manager.fetch(obj=self, include_meta=include_meta,\n chunk_size=chunk_size)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndownload the object from storage and writes it to the specified directory.", "response": "def download(self, directory, structure=True):\n \"\"\"\n Fetches the object from storage, and writes it to the specified\n directory. The directory must exist before calling this method.\n\n If the object name represents a nested folder structure, such as\n \"foo/bar/baz.txt\", that folder structure will be created in the target\n directory by default. If you do not want the nested folders to be\n created, pass `structure=False` in the parameters.\n \"\"\"\n return self.manager.download(self, directory, structure=structure)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef copy(self, new_container, new_obj_name=None, extra_info=None):\n return self.container.copy_object(self, new_container,\n new_obj_name=new_obj_name)", "response": "Copies this object to a new container optionally giving it a new\n name."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmove this object to another container.", "response": "def move(self, new_container, new_obj_name=None, extra_info=None):\n \"\"\"\n Works just like copy_object, except that this object is deleted after a\n successful copy. This means that this storage_object reference will no\n longer be valid.\n \"\"\"\n return self.container.move_object(self, new_container,\n new_obj_name=new_obj_name)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef change_content_type(self, new_ctype, guess=False):\n self.container.change_object_content_type(self, new_ctype=new_ctype,\n guess=guess)", "response": "Copies the object to itself but applies a new content - type."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef remove_metadata_key(self, key, prefix=None):\n self.manager.remove_metadata_key(self, key, prefix=prefix)", "response": "Removes the specified key from the storage object s metadata."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a URL that can be used to access this object.", "response": "def get_temp_url(self, seconds, method=\"GET\"):\n \"\"\"\n Returns a URL that can be used to access this object. The URL will\n expire after `seconds` seconds.\n\n The only methods supported are GET and PUT. Anything else will raise\n an InvalidTemporaryURLMethod exception.\n \"\"\"\n return self.container.get_temp_url(self, seconds=seconds,\n method=method)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get(self, obj):\n name = utils.get_name(obj)\n uri = \"/%s/%s\" % (self.uri_base, name)\n resp, resp_body = self.api.method_head(uri)\n hdrs = resp.headers\n try:\n content_length = int(hdrs.get(\"content-length\"))\n except (TypeError, ValueError):\n content_length = None\n data = {\"name\": name,\n \"bytes\": content_length,\n \"content_type\": hdrs.get(\"content-type\"),\n \"hash\": hdrs.get(\"etag\"),\n \"last_modified\": hdrs.get(\"last-modified\"),\n \"timestamp\": hdrs.get(\"x-timestamp\"),\n }\n return StorageObject(self, data, loaded=True)", "response": "Gets the information about the specified object."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create(self, file_or_path=None, data=None, obj_name=None,\n content_type=None, etag=None, content_encoding=None,\n content_length=None, ttl=None, chunked=False, metadata=None,\n chunk_size=None, headers=None, return_none=False):\n \"\"\"\n Creates or replaces a storage object in this container.\n\n The content of the object can either be a stream of bytes (`data`), or\n a file on disk (`file_or_path`). The disk file can be either an open\n file-like object, or an absolute path to the file on disk.\n\n When creating object from a data stream, you must specify the name of\n the object to be created in the container via the `obj_name` parameter.\n When working with a file, though, if no `obj_name` value is specified,\n the file`s name will be used.\n\n You may optionally set the `content_type` and `content_encoding`\n parameters; pyrax will create the appropriate headers when the object\n is stored. If no `content_type` is specified, the object storage system\n will make an intelligent guess based on the content of the object.\n\n If the size of the file is known, it can be passed as `content_length`.\n\n If you wish for the object to be temporary, specify the time it should\n be stored in seconds in the `ttl` parameter. If this is specified, the\n object will be deleted after that number of seconds.\n\n If you wish to store a stream of data (i.e., where you don't know the\n total size in advance), set the `chunked` parameter to True, and omit\n the `content_length` and `etag` parameters. This allows the data to be\n streamed to the object in the container without having to be written to\n disk first.\n \"\"\"\n # First make sure that there is a content source.\n if (data, file_or_path) == (None, None):\n raise exc.NoContentSpecified(\"You must specify either a file path, \"\n \"an open file-like object, or a stream of bytes when \"\n \"creating an object.\")\n src = data if data else file_or_path\n if src is file_or_path:\n obj_name = _validate_file_or_path(file_or_path, obj_name)\n if not obj_name:\n raise exc.MissingName(\"No name for the object to be created has \"\n \"been specified, and none can be inferred from context\")\n if chunk_size:\n chunked = True\n if chunked:\n chunk_size = chunk_size or DEFAULT_CHUNKSIZE\n headers = headers or {}\n if metadata:\n metadata = _massage_metakeys(metadata, OBJECT_META_PREFIX)\n headers = metadata\n if ttl is not None:\n headers[\"X-Delete-After\"] = ttl\n if src is data:\n self._upload(obj_name, data, content_type,\n content_encoding, content_length, etag, chunked,\n chunk_size, headers)\n elif hasattr(file_or_path, \"read\"):\n self._upload(obj_name, file_or_path, content_type,\n content_encoding, content_length, etag, False,\n chunk_size, headers)\n else:\n # Need to wrap the call in a context manager\n with open(file_or_path, \"rb\") as ff:\n self._upload(obj_name, ff, content_type,\n content_encoding, content_length, etag, False,\n chunk_size, headers)\n if return_none:\n return\n return self.get(obj_name)", "response": "Creates or replaces a storage object in this container."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nupload the object to the object store.", "response": "def _upload(self, obj_name, content, content_type, content_encoding,\n content_length, etag, chunked, chunk_size, headers):\n \"\"\"\n Handles the uploading of content, including working around the 5GB\n maximum file size.\n \"\"\"\n if content_type is not None:\n headers[\"Content-Type\"] = content_type\n if content_encoding is not None:\n headers[\"Content-Encoding\"] = content_encoding\n if isinstance(content, six.string_types):\n fsize = len(content)\n else:\n if chunked:\n fsize = None\n elif content_length is None:\n fsize = get_file_size(content)\n else:\n fsize = content_length\n if fsize is None or fsize <= MAX_FILE_SIZE:\n # We can just upload it as-is.\n return self._store_object(obj_name, content=content, etag=etag,\n chunked=chunked, chunk_size=chunk_size, headers=headers)\n # Files larger than MAX_FILE_SIZE must be segmented\n # and uploaded separately.\n num_segments = int(math.ceil(float(fsize) / MAX_FILE_SIZE))\n digits = int(math.log10(num_segments)) + 1\n # NOTE: This could be greatly improved with threading or other\n # async design.\n for segment in range(num_segments):\n sequence = str(segment + 1).zfill(digits)\n seg_name = \"%s.%s\" % (obj_name, sequence)\n with utils.SelfDeletingTempfile() as tmpname:\n with open(tmpname, \"wb\") as tmp:\n tmp.write(content.read(MAX_FILE_SIZE))\n with open(tmpname, \"rb\") as tmp:\n # We have to calculate the etag for each segment\n etag = utils.get_checksum(tmp)\n self._store_object(seg_name, content=tmp, etag=etag,\n chunked=False, headers=headers)\n # Upload the manifest\n headers.pop(\"ETag\", \"\")\n headers[\"X-Object-Manifest\"] = \"%s/%s.\" % (self.name, obj_name)\n self._store_object(obj_name, content=None, headers=headers)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nstores the contents of an object in the object store.", "response": "def _store_object(self, obj_name, content, etag=None, chunked=False,\n chunk_size=None, headers=None):\n \"\"\"\n Handles the low-level creation of a storage object and the uploading of\n the contents of that object.\n \"\"\"\n head_etag = headers.pop(\"ETag\", \"\")\n if chunked:\n headers.pop(\"Content-Length\", \"\")\n headers[\"Transfer-Encoding\"] = \"chunked\"\n elif etag is None and content is not None:\n etag = utils.get_checksum(content)\n if etag:\n headers[\"ETag\"] = etag\n if not headers.get(\"Content-Type\"):\n headers[\"Content-Type\"] = None\n uri = \"/%s/%s\" % (self.uri_base, obj_name)\n resp, resp_body = self.api.method_put(uri, data=content,\n headers=headers)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef fetch(self, obj, include_meta=False, chunk_size=None, size=None,\n extra_info=None):\n \"\"\"\n Fetches the object from storage.\n\n If 'include_meta' is False, only the bytes representing the\n stored object are returned.\n\n Note: if 'chunk_size' is defined, the 'include_meta' parameter is\n ignored.\n\n If 'size' is specified, only the first 'size' bytes of the object will\n be returned. If the object if smaller than 'size', the entire object is\n returned.\n\n When 'include_meta' is True, what is returned from this method is a\n 2-tuple:\n Element 0: a dictionary containing metadata about the file.\n Element 1: a stream of bytes representing the object's contents.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n uri = \"/%s/%s\" % (self.uri_base, utils.get_name(obj))\n if chunk_size:\n # Need the total size of the object\n if not isinstance(obj, StorageObject):\n obj = self.get(obj)\n obj_size = obj.total_bytes\n return self._fetch_chunker(uri, chunk_size, size, obj_size)\n headers = {}\n if size:\n headers = {\"Range\": \"bytes=0-%s\" % size}\n resp, resp_body = self.api.method_get(uri, headers=headers,\n raw_content=True)\n if include_meta:\n meta_resp, meta_body = self.api.method_head(uri)\n return (meta_resp.headers, resp_body)\n return resp_body", "response": "Fetches the object from storage."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _fetch_chunker(self, uri, chunk_size, size, obj_size):\n pos = 0\n total_bytes = 0\n size = size or obj_size\n max_size = min(size, obj_size)\n while True:\n endpos = min(obj_size, pos + chunk_size - 1)\n headers = {\"Range\": \"bytes=%s-%s\" % (pos, endpos)}\n resp, resp_body = self.api.method_get(uri, headers=headers,\n raw_content=True)\n pos = endpos + 1\n if not resp_body:\n # End of file\n return\n yield resp_body\n total_bytes += len(resp_body)\n if total_bytes >= max_size:\n return", "response": "Returns a generator that returns an object in chunks."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ndelete all objects in this container.", "response": "def delete_all_objects(self, nms, async_=False):\n \"\"\"\n Deletes all objects from this container.\n\n By default the call will block until all objects have been deleted. By\n passing True for the 'async_' parameter, this method will not block, and\n instead return an object that can be used to follow the progress of the\n deletion. When deletion is complete the bulk deletion object's\n 'results' attribute will be populated with the information returned\n from the API call. In synchronous mode this is the value that is\n returned when the call completes. It is a dictionary with the following\n keys:\n\n deleted - the number of objects deleted\n not_found - the number of objects not found\n status - the HTTP return status code. '200 OK' indicates success\n errors - a list of any errors returned by the bulk delete call\n \"\"\"\n if nms is None:\n nms = self.api.list_object_names(self.name, full_listing=True)\n return self.api.bulk_delete(self.name, nms, async_=async_)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef download(self, obj, directory, structure=True):\n if not os.path.isdir(directory):\n raise exc.FolderNotFound(\"The directory '%s' does not exist.\" %\n directory)\n obj_name = utils.get_name(obj)\n path, fname = os.path.split(obj_name)\n if structure:\n fullpath = os.path.join(directory, path)\n if not os.path.exists(fullpath):\n os.makedirs(fullpath)\n target = os.path.join(fullpath, fname)\n else:\n target = os.path.join(directory, fname)\n with open(target, \"wb\") as dl:\n content = self.fetch(obj)\n try:\n dl.write(content)\n except UnicodeEncodeError:\n encoding = pyrax.get_encoding()\n dl.write(content.encode(encoding))", "response": "Downloads the object from storage and writes it to the specified directory."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nremoving an object from public access before the TTL expires.", "response": "def purge(self, obj, email_addresses=None):\n \"\"\"\n Removes a CDN-enabled object from public access before the TTL expires.\n Please note that there is a limit (at this time) of 25 such requests;\n if you need to purge more than that, you must contact support.\n\n If one or more email_addresses are included, an email confirming the\n purge is sent to each address.\n \"\"\"\n cname = utils.get_name(self.container)\n oname = utils.get_name(obj)\n headers = {}\n if email_addresses:\n email_addresses = utils.coerce_to_list(email_addresses)\n headers[\"X-Purge-Email\"] = \", \".join(email_addresses)\n uri = \"/%s/%s\" % (cname, oname)\n resp, resp_body = self.api.cdn_request(uri, method=\"DELETE\",\n headers=headers)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the metadata for the specified object as a dict.", "response": "def get_metadata(self, obj, prefix=None):\n \"\"\"\n Returns the metadata for the specified object as a dict.\n \"\"\"\n uri = \"/%s/%s\" % (self.uri_base, utils.get_name(obj))\n resp, resp_body = self.api.method_head(uri)\n ret = {}\n # Add the metadata prefix, if needed.\n if prefix is None:\n prefix = OBJECT_META_PREFIX\n low_prefix = prefix.lower()\n for hkey, hval in list(resp.headers.items()):\n lowkey = hkey.lower()\n if lowkey.startswith(low_prefix):\n cleaned = hkey.replace(low_prefix, \"\").replace(\"-\", \"_\")\n ret[cleaned] = hval\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef set_metadata(self, obj, metadata, clear=False, prefix=None):\n # Add the metadata prefix, if needed.\n if prefix is None:\n prefix = OBJECT_META_PREFIX\n massaged = _massage_metakeys(metadata, prefix)\n cname = utils.get_name(self.container)\n oname = utils.get_name(obj)\n new_meta = {}\n # Note that the API for object POST is the opposite of that for\n # container POST: for objects, all current metadata is deleted,\n # whereas for containers you need to set the values to an empty\n # string to delete them.\n if not clear:\n obj_meta = self.get_metadata(obj, prefix=prefix)\n new_meta = _massage_metakeys(obj_meta, prefix)\n utils.case_insensitive_update(new_meta, massaged)\n # Remove any empty values, since the object metadata API will\n # store them.\n to_pop = []\n for key, val in six.iteritems(new_meta):\n if not val:\n to_pop.append(key)\n for key in to_pop:\n new_meta.pop(key)\n uri = \"/%s/%s\" % (cname, oname)\n resp, resp_body = self.api.method_post(uri, headers=new_meta)", "response": "Sets the metadata of the specified object."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef remove_metadata_key(self, obj, key):\n meta_dict = {key: \"\"}\n return self.set_metadata(obj, meta_dict)", "response": "Removes the specified key from the object s metadata."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _configure_cdn(self):\n ident = self.identity\n cdn_svc = ident.services.get(\"object_cdn\")\n if cdn_svc:\n ep = cdn_svc.endpoints.get(self.region_name)\n if ep:\n self.cdn_management_url = ep.public_url", "response": "Initialize CDN - related endpoints if available."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _backwards_aliases(self):\n self.list_containers = self.list_container_names\n self.get_all_containers = self.list\n self.get_container = self.get\n self.create_container = self.create\n self.delete_container = self.delete\n self.get_container_objects = self.list_container_objects\n self.get_container_object_names = self.list_container_object_names\n self.get_info = self.get_account_info", "response": "In order to keep this backwards - compatible with previous versions of the old names to the new names to the new names."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get(self, item):\n if isinstance(item, six.string_types):\n item = super(StorageClient, self).get(item)\n return item", "response": "Returns the container whose name is provided as item."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a manager to handle interacting with Containers.", "response": "def _configure_manager(self):\n \"\"\"\n Creates a manager to handle interacting with Containers.\n \"\"\"\n self._manager = ContainerManager(self, resource_class=Container,\n response_key=\"\", uri_base=\"\")"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a dictionary containing information about the account.", "response": "def get_account_details(self):\n \"\"\"\n Returns a dictionary containing information about the account.\n \"\"\"\n headers = self._manager.get_account_headers()\n acct_prefix = \"x-account-\"\n meta_prefix = ACCOUNT_META_PREFIX.lower()\n ret = {}\n for hkey, hval in list(headers.items()):\n lowkey = hkey.lower()\n if lowkey.startswith(acct_prefix):\n if not lowkey.startswith(meta_prefix):\n cleaned = hkey.replace(acct_prefix, \"\").replace(\"-\", \"_\")\n try:\n # Most values are ints\n ret[cleaned] = int(hval)\n except ValueError:\n ret[cleaned] = hval\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a tuple for the number of containers and total bytes in the current account.", "response": "def get_account_info(self):\n \"\"\"\n Returns a tuple for the number of containers and total bytes in the\n account.\n \"\"\"\n headers = self._manager.get_account_headers()\n return (headers.get(\"x-account-container-count\"),\n headers.get(\"x-account-bytes-used\"))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting the metadata for the given account.", "response": "def set_account_metadata(self, metadata, clear=False, prefix=None,\n extra_info=None):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates the\n account's metadata with them.\n\n If 'clear' is True, any existing metadata is deleted and only the\n passed metadata is retained. Otherwise, the values passed here update\n the account's metadata.\n\n By default, the standard account metadata prefix ('X-Account-Meta-') is\n prepended to the header name if it isn't present. For non-standard\n headers, you must include a non-None prefix, such as an empty string.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self._manager.set_account_metadata(metadata, clear=clear,\n prefix=prefix)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_temp_url_key(self, cached=True):\n meta = self._cached_temp_url_key\n if not cached or not meta:\n key = \"temp_url_key\"\n meta = self.get_account_metadata().get(key)\n self._cached_temp_url_key = meta\n return meta", "response": "Returns the current TempURL key or None if it has not been set."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef set_temp_url_key(self, key=None):\n if key is None:\n key = uuid.uuid4().hex\n meta = {\"Temp-Url-Key\": key}\n self.set_account_metadata(meta)\n self._cached_temp_url_key = key", "response": "Sets the key for the Temporary URL for the account."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_temp_url(self, container, obj, seconds, method=\"GET\", key=None,\n cached=True):\n \"\"\"\n Given a storage object in a container, returns a URL that can be used\n to access that object. The URL will expire after `seconds` seconds.\n\n The only methods supported are GET and PUT. Anything else will raise\n an `InvalidTemporaryURLMethod` exception.\n\n If you have your Temporary URL key, you can pass it in directly and\n potentially save an API call to retrieve it. If you don't pass in the\n key, and don't wish to use any cached value, pass `cached=False`.\n \"\"\"\n return self._manager.get_temp_url(container, obj, seconds,\n method=method, key=key, cached=cached)", "response": "Given a storage object in a container returns a temporary URL that can be used to access that object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list(self, limit=None, marker=None, end_marker=None, prefix=None):\n return self._manager.list(limit=limit, marker=marker,\n end_marker=end_marker, prefix=prefix)", "response": "List the containers in this account using the parameters to control\n pagination"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef make_container_public(self, container, ttl=None):\n return self._manager.make_public(container, ttl=ttl)", "response": "Enables CDN access for the specified container."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a list of info on Containers.", "response": "def list_containers_info(self, limit=None, marker=None):\n \"\"\"Returns a list of info on Containers.\n\n For each container, a dict containing the following keys is returned:\n \\code\n name - the name of the container\n count - the number of objects in the container\n bytes - the total bytes in the container\n \"\"\"\n return self._manager.list_containers_info(limit=limit, marker=marker)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nlisting all of the directories in a container.", "response": "def list_container_subdirs(self, container, limit=None, marker=None,\n prefix=None, delimiter=None, full_listing=False):\n \"\"\"\n Although you cannot nest directories, you can simulate a hierarchical\n structure within a single container by adding forward slash characters\n (/) in the object name. This method returns a list of all of these\n pseudo-subdirectories in the specified container.\n \"\"\"\n return self._manager.list_subdirs(container, limit=limit,\n marker=marker, prefix=prefix, delimiter=delimiter,\n full_listing=full_listing)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef list_container_object_names(self, container, limit=None, marker=None,\n prefix=None, delimiter=None, full_listing=False):\n \"\"\"\n Returns the names of all the objects in the specified container,\n optionally limited by the pagination parameters.\n \"\"\"\n return self._manager.list_object_names(container, marker=marker,\n limit=limit, prefix=prefix, delimiter=delimiter,\n full_listing=full_listing)", "response": "Returns the names of all the objects in the specified container optionally limited by the pagination parameters."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn a dictionary containing the metadata for the container.", "response": "def get_container_metadata(self, container, prefix=None):\n \"\"\"\n Returns a dictionary containing the metadata for the container.\n \"\"\"\n return self._manager.get_metadata(container, prefix=prefix)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nset the metadata for a container.", "response": "def set_container_metadata(self, container, metadata, clear=False,\n prefix=None):\n \"\"\"\n Accepts a dictionary of metadata key/value pairs and updates the\n specified container metadata with them.\n\n If 'clear' is True, any existing metadata is deleted and only the\n passed metadata is retained. Otherwise, the values passed here update\n the container's metadata.\n\n By default, the standard container metadata prefix\n ('X-Container-Meta-') is prepended to the header name if it isn't\n present. For non-standard headers, you must include a non-None prefix,\n such as an empty string.\n \"\"\"\n return self._manager.set_metadata(container, metadata, clear=clear,\n prefix=prefix)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nremove all of the metadata of a container.", "response": "def delete_container_metadata(self, container, prefix=None):\n \"\"\"\n Removes all of thethe container's metadata.\n\n By default, all metadata beginning with the standard container metadata\n prefix ('X-Container-Meta-') is removed. If you wish to remove all\n metadata beginning with a different prefix, you must specify that\n prefix.\n \"\"\"\n return self._manager.delete_metadata(container, prefix=prefix)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_object_metadata(self, container, obj, prefix=None):\n return self._manager.get_object_metadata(container, obj, prefix=prefix)", "response": "Returns the metadata for the specified object as a dict."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nremoves the specified key from the storage object s metadata.", "response": "def remove_object_metadata_key(self, container, obj, key, prefix=None):\n \"\"\"\n Removes the specified key from the storage object's metadata. If the key\n does not exist in the metadata, nothing is done.\n \"\"\"\n self.set_object_metadata(container, obj, {key: \"\"}, prefix=prefix)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef list_container_objects(self, container, limit=None, marker=None,\n prefix=None, delimiter=None, end_marker=None, full_listing=False):\n \"\"\"\n Return a list of StorageObjects representing the objects in the\n container. You can use the marker, end_marker, and limit params to\n handle pagination, and the prefix and delimiter params to filter the\n objects returned. Also, by default only the first 10,000 objects are\n returned; if you set full_listing to True, an iterator to return all\n the objects in the container is returned. In this case, only the\n 'prefix' parameter is used; if you specify any others, they are\n ignored.\n \"\"\"\n if full_listing:\n return self._manager.object_listing_iterator(container,\n prefix=prefix)\n return self._manager.list_objects(container, limit=limit,\n marker=marker, prefix=prefix, delimiter=delimiter,\n end_marker=end_marker)", "response": "List the objects in a container."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndelete the specified object in the specified container in the specified number of seconds.", "response": "def delete_object_in_seconds(self, cont, obj, seconds, extra_info=None):\n \"\"\"\n Sets the object in the specified container to be deleted after the\n specified number of seconds.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self._manager.delete_object_in_seconds(cont, obj, seconds)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nstoring an object in a container.", "response": "def store_object(self, container, obj_name, data, content_type=None,\n etag=None, content_encoding=None, ttl=None, return_none=False,\n chunk_size=None, headers=None, metadata=None, extra_info=None):\n \"\"\"\n Creates a new object in the specified container, and populates it with\n the given data. A StorageObject reference to the uploaded file\n will be returned, unless 'return_none' is set to True.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self.create_object(container, obj_name=obj_name, data=data,\n content_type=content_type, etag=etag,\n content_encoding=content_encoding, ttl=ttl,\n return_none=return_none, chunk_size=chunk_size,\n headers=headers, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nuploading a file to a container.", "response": "def upload_file(self, container, file_or_path, obj_name=None,\n content_type=None, etag=None, content_encoding=None, ttl=None,\n content_length=None, return_none=False, headers=None,\n metadata=None, extra_info=None):\n \"\"\"\n Uploads the specified file to the container. If no name is supplied,\n the file's name will be used. Either a file path or an open file-like\n object may be supplied. A StorageObject reference to the uploaded file\n will be returned, unless 'return_none' is set to True.\n\n You may optionally set the `content_type` and `content_encoding`\n parameters; pyrax will create the appropriate headers when the object\n is stored.\n\n If the size of the file is known, it can be passed as `content_length`.\n\n If you wish for the object to be temporary, specify the time it should\n be stored in seconds in the `ttl` parameter. If this is specified, the\n object will be deleted after that number of seconds.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self.create_object(container, file_or_path=file_or_path,\n obj_name=obj_name, content_type=content_type, etag=etag,\n content_encoding=content_encoding, ttl=ttl, headers=headers,\n metadata=metadata, return_none=return_none)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef fetch_object(self, container, obj, include_meta=False,\n chunk_size=None, size=None, extra_info=None):\n \"\"\"\n Fetches the object from storage.\n\n If 'include_meta' is False, only the bytes representing the\n stored object are returned.\n\n Note: if 'chunk_size' is defined, you must fully read the object's\n contents before making another request.\n\n If 'size' is specified, only the first 'size' bytes of the object will\n be returned. If the object if smaller than 'size', the entire object is\n returned.\n\n When 'include_meta' is True, what is returned from this method is a\n 2-tuple:\n Element 0: a dictionary containing metadata about the file.\n Element 1: a stream of bytes representing the object's contents.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self._manager.fetch_object(container, obj,\n include_meta=include_meta, chunk_size=chunk_size, size=size)", "response": "Fetches the object from storage."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef fetch_partial(self, container, obj, size):\n return self._manager.fetch_partial(container, obj, size)", "response": "Fetch a partial version of the object."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef fetch_dlo(self, container, name, chunk_size=None, verbose=False):\n if chunk_size is None:\n chunk_size = DEFAULT_CHUNKSIZE\n\n class FetchChunker(object):\n \"\"\"\n Class that takes the generator objects returned by a chunked\n fetch_object() call and wraps them to behave as file-like objects\n for uploading.\n \"\"\"\n def __init__(self, gen, verbose=False):\n self.gen = gen\n self.verbose = verbose\n self.processed = 0\n self.count = 0\n self.interval = 100\n\n def read(self, size=None):\n self.count += 1\n if self.verbose:\n if self.count > self.interval:\n self.count = 0\n print(\".\")\n ret = next(self.gen)\n self.processed += len(ret)\n return ret\n\n parts = self.get_container_objects(container, prefix=name)\n fetches = [(part.name, self.fetch_object(container, part.name,\n chunk_size=chunk_size))\n for part in parts\n if part.name != name]\n job = [(fetch[0], FetchChunker(fetch[1], verbose=verbose))\n for fetch in fetches]\n return job", "response": "Fetch a single - part DLO from a container."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef download_object(self, container, obj, directory, structure=True):\n return self._manager.download_object(container, obj, directory,\n structure=structure)", "response": "Downloads the specified object from storage and writes it to the specified directory."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete(self, container, del_objects=False):\n return self._manager.delete(container, del_objects=del_objects)", "response": "Deletes the specified container."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef copy_object(self, container, obj, new_container, new_obj_name=None,\n content_type=None, extra_info=None):\n \"\"\"\n Copies the object to the new container, optionally giving it a new name.\n If you copy to the same container, you must supply a different name.\n\n You can optionally change the content_type of the object by supplying\n that in the 'content_type' parameter.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self._manager.copy_object(container, obj, new_container,\n new_obj_name=new_obj_name, content_type=content_type)", "response": "Copies the object to a new container optionally giving it a new name."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef move_object(self, container, obj, new_container, new_obj_name=None,\n new_reference=False, content_type=None, extra_info=None):\n \"\"\"\n Works just like copy_object, except that the source object is deleted\n after a successful copy.\n\n You can optionally change the content_type of the object by supplying\n that in the 'content_type' parameter.\n\n NOTE: any references to the original object will no longer be valid;\n you will have to get a reference to the new object by passing True for\n the 'new_reference' parameter. When this is True, a reference to the\n newly moved object is returned. Otherwise, the etag for the moved\n object is returned.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self._manager.move_object(container, obj, new_container,\n new_obj_name=new_obj_name, new_reference=new_reference,\n content_type=content_type)", "response": "Moves an object from one container to another."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncopying an object to itself but applies a new content - type.", "response": "def change_object_content_type(self, container, obj, new_ctype,\n guess=False, extra_info=None):\n \"\"\"\n Copies object to itself, but applies a new content-type. The guess\n feature requires the container to be CDN-enabled. If not then the\n content-type must be supplied. If using guess with a CDN-enabled\n container, new_ctype can be set to None. Failure during the put will\n result in a swift exception.\n\n The 'extra_info' parameter is included for backwards compatibility. It\n is no longer used at all, and will not be modified with swiftclient\n info, since swiftclient is not used any more.\n \"\"\"\n return self._manager.change_object_content_type(container, obj,\n new_ctype, guess=guess)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _upload_folder_in_background(self, folder_path, container, ignore,\n upload_key, ttl=None):\n \"\"\"Runs the folder upload in the background.\"\"\"\n uploader = FolderUploader(folder_path, container, ignore, upload_key,\n self, ttl=ttl)\n uploader.start()", "response": "Runs the folder upload in the background."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef sync_folder_to_container(self, folder_path, container, delete=False,\n include_hidden=False, ignore=None, ignore_timestamps=False,\n object_prefix=\"\", verbose=False):\n \"\"\"\n Compares the contents of the specified folder, and checks to make sure\n that the corresponding object is present in the specified container. If\n there is no remote object matching the local file, it is created. If a\n matching object exists, the etag is examined to determine if the object\n in the container matches the local file; if they differ, the container\n is updated with the local file if the local file is newer when\n `ignore_timestamps' is False (default). If `ignore_timestamps` is True,\n the object is overwritten with the local file contents whenever the\n etags differ. NOTE: the timestamp of a remote object is the time it was\n uploaded, not the original modification time of the file stored in that\n object. Unless 'include_hidden' is True, files beginning with an\n initial period are ignored.\n\n If the 'delete' option is True, any objects in the container that do\n not have corresponding files in the local folder are deleted.\n\n You can selectively ignore files by passing either a single pattern or\n a list of patterns; these will be applied to the individual folder and\n file names, and any names that match any of the 'ignore' patterns will\n not be uploaded. The patterns should be standard *nix-style shell\n patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as\n 'program.pyc' and 'abcpyc'.\n\n If `object_prefix` is set it will be appended to the object name when\n it is checked and uploaded to the container. For example, if you use\n sync_folder_to_container(\"folderToSync/\", myContainer,\n object_prefix=\"imgFolder\") it will upload the files to the\n container/imgFolder/... instead of just container/...\n\n Set `verbose` to True to make it print what is going on. It will\n show which files are being uploaded and which ones are not and why.\n \"\"\"\n cont = self.get_container(container)\n self._local_files = []\n # Load a list of all the remote objects so we don't have to keep\n # hitting the service\n if verbose:\n log = logging.getLogger(\"pyrax\")\n log.info(\"Loading remote object list (prefix=%s)\", object_prefix)\n data = cont.get_objects(prefix=object_prefix, full_listing=True)\n self._remote_files = dict((d.name, d) for d in data)\n self._sync_summary = {\"total\": 0,\n \"uploaded\": 0,\n \"ignored\": 0,\n \"older\": 0,\n \"duplicate\": 0,\n \"failed\": 0,\n \"failure_reasons\": [],\n \"deleted\": 0,\n }\n self._sync_folder_to_container(folder_path, cont, prefix=\"\",\n delete=delete, include_hidden=include_hidden, ignore=ignore,\n ignore_timestamps=ignore_timestamps,\n object_prefix=object_prefix, verbose=verbose)\n # Unset the _remote_files\n self._remote_files = None\n if verbose:\n # Log the summary\n summary = self._sync_summary\n log.info(\"Folder sync completed at %s\" % time.ctime())\n log.info(\" Total files processed: %s\" % summary[\"total\"])\n log.info(\" Number Uploaded: %s\" % summary[\"uploaded\"])\n log.info(\" Number Ignored: %s\" % summary[\"ignored\"])\n log.info(\" Number Skipped (older): %s\" % summary[\"older\"])\n log.info(\" Number Skipped (dupe): %s\" % summary[\"duplicate\"])\n log.info(\" Number Deleted: %s\" % summary[\"deleted\"])\n log.info(\" Number Failed: %s\" % summary[\"failed\"])\n if summary[\"failed\"]:\n for reason in summary[\"failure_reasons\"]:\n log.info(\" Reason: %s\" % reason)", "response": "Syncs the contents of the specified folder to the specified container."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes all the objects that are not present in the specified container.", "response": "def _delete_objects_not_in_list(self, cont, object_prefix=\"\"):\n \"\"\"\n Finds all the objects in the specified container that are not present\n in the self._local_files list, and deletes them.\n \"\"\"\n objnames = set(cont.get_object_names(prefix=object_prefix,\n full_listing=True))\n localnames = set(self._local_files)\n to_delete = list(objnames.difference(localnames))\n self._sync_summary[\"deleted\"] += len(to_delete)\n # We don't need to wait around for this to complete. Store the thread\n # reference in case it is needed at some point.\n self._thread = self.bulk_delete(cont, to_delete, async_=True)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nbulking deletes multiple objects from a container in a single call.", "response": "def bulk_delete(self, container, object_names, async_=False):\n \"\"\"\n Deletes multiple objects from a container in a single call.\n\n The bulk deletion call does not return until all of the specified\n objects have been processed. For large numbers of objects, this can\n take quite a while, so there is an 'async_' parameter to give you the\n option to have this call return immediately. If 'async_' is True, an\n object is returned with a 'completed' attribute that will be set to\n True as soon as the bulk deletion is complete, and a 'results'\n attribute that will contain a dictionary (described below) with the\n results of the bulk deletion.\n\n When deletion is complete the bulk deletion object's 'results'\n attribute will be populated with the information returned from the API\n call. In synchronous mode this is the value that is returned when the\n call completes. It is a dictionary with the following keys:\n\n deleted - the number of objects deleted\n not_found - the number of objects not found\n status - the HTTP return status code. '200 OK' indicates success\n errors - a list of any errors returned by the bulk delete call\n\n This isn't available in swiftclient yet, so it's using code patterned\n after the client code in that library.\n \"\"\"\n deleter = BulkDeleter(self, container, object_names)\n deleter.start()\n if async_:\n return deleter\n while not deleter.completed:\n time.sleep(self.bulk_delete_interval)\n return deleter.results"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef upload_files_in_folder(self, dirname, fnames):\n if utils.match_pattern(dirname, self.ignore):\n return False\n good_names = (nm for nm in fnames\n if not utils.match_pattern(nm, self.ignore))\n for fname in good_names:\n if self.client._should_abort_folder_upload(self.upload_key):\n return\n full_path = os.path.join(dirname, fname)\n obj_name = os.path.relpath(full_path, self.root_folder)\n obj_size = os.stat(full_path).st_size\n self.client.upload_file(self.container, full_path,\n obj_name=obj_name, return_none=True, ttl=self.ttl)\n self.client._update_progress(self.upload_key, obj_size)", "response": "Handles the iteration across files within a folder."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nstarts the uploading thread.", "response": "def run(self):\n \"\"\"Starts the uploading thread.\"\"\"\n root_path, folder_name = os.path.split(self.root_folder)\n self.root_folder = os.path.join(root_path, folder_name)\n for dirname, _, fnames in os.walk(self.root_folder):\n self.upload_files_in_folder(dirname, fnames)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ngiving an iterable enumerate its contents for a user to choose from.", "response": "def option_chooser(options, attr=None):\n \"\"\"Given an iterable, enumerate its contents for a user to choose from.\n If the optional `attr` is not None, that attribute in each iterated\n object will be printed.\n\n This function will exit the program if the user chooses the escape option.\n \"\"\"\n for num, option in enumerate(options):\n if attr:\n print(\"%s: %s\" % (num, getattr(option, attr)))\n else:\n print(\"%s: %s\" % (num, option))\n # Add an escape option\n escape_opt = num + 1\n print(\"%s: I want to exit!\" % escape_opt)\n choice = six.moves.input(\"Selection: \")\n try:\n ichoice = int(choice)\n if ichoice > escape_opt:\n raise ValueError\n except ValueError:\n print(\"Valid entries are the numbers 0-%s. Received '%s'.\" % (escape_opt,\n choice))\n sys.exit()\n\n if ichoice == escape_opt:\n print(\"Bye!\")\n sys.exit()\n\n return ichoice"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nassures that a queue ID or name is passed as the queue parameter.", "response": "def assure_queue(fnc):\n \"\"\"\n Converts a queue ID or name passed as the 'queue' parameter to a Queue\n object.\n \"\"\"\n @wraps(fnc)\n def _wrapped(self, queue, *args, **kwargs):\n if not isinstance(queue, Queue):\n # Must be the ID\n queue = self._manager.get(queue)\n return fnc(self, queue, *args, **kwargs)\n return _wrapped"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ndeleting the message with the supplied ID from the message manager.", "response": "def delete_message(self, msg_id, claim_id=None):\n \"\"\"\n Deletes the message whose ID matches the supplied msg_id from the\n specified queue. If the message has been claimed, the ID of that claim\n must be passed as the 'claim_id' parameter.\n \"\"\"\n return self._message_manager.delete(msg_id, claim_id=claim_id)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef list(self, include_claimed=False, echo=False, marker=None, limit=None):\n return self._message_manager.list(include_claimed=include_claimed,\n echo=echo, marker=marker, limit=limit)", "response": "Returns a list of all messages for this queue."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_by_claim(self, claim):\n if not isinstance(claim, QueueClaim):\n claim = self._claim_manager.get(claim)\n return claim.messages", "response": "Returns a list of all the messages that have been claimed by the specified claim."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a new QueueClaim object that contains the messages that have been claimed for the given time period and the count unclaimed messages.", "response": "def claim_messages(self, ttl, grace, count=None):\n \"\"\"\n Claims up to `count` unclaimed messages from this queue. If count is\n not specified, the default is to claim 10 messages.\n\n The `ttl` parameter specifies how long the server should wait before\n releasing the claim. The ttl value MUST be between 60 and 43200 seconds.\n\n The `grace` parameter is the message grace period in seconds. The value\n of grace MUST be between 60 and 43200 seconds. The server extends the\n lifetime of claimed messages to be at least as long as the lifetime of\n the claim itself, plus a specified grace period to deal with crashed\n workers (up to 1209600 or 14 days including claim lifetime). If a\n claimed message would normally live longer than the grace period, its\n expiration will not be adjusted.\n\n Returns a QueueClaim object, whose 'messages' attribute contains the\n list of QueueMessage objects representing the claimed messages.\n \"\"\"\n return self._claim_manager.claim(ttl, grace, count=count)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the specified claim with either a new TTL or a new grace period.", "response": "def update_claim(self, claim, ttl=None, grace=None):\n \"\"\"\n Updates the specified claim with either a new TTL or grace period, or\n both.\n \"\"\"\n return self._claim_manager.update(claim, ttl=ttl, grace=grace)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _add_details(self, info):\n super(QueueMessage, self)._add_details(info)\n if self.href is None:\n return\n parsed = urllib.parse.urlparse(self.href)\n self.id = parsed.path.rsplit(\"/\", 1)[-1]\n query = parsed.query\n if query:\n self.claim_id = query.split(\"claim_id=\")[-1]", "response": "Add details to the object."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nadds details to the object.", "response": "def _add_details(self, info):\n \"\"\"\n The 'id' attribute is not supplied directly, but included as part of\n the 'href' value. Also, convert the dicts for messages into\n QueueMessage objects.\n \"\"\"\n msg_dicts = info.pop(\"messages\", [])\n super(QueueClaim, self)._add_details(info)\n parsed = urllib.parse.urlparse(self.href)\n self.id = parsed.path.rsplit(\"/\", 1)[-1]\n self.messages = [QueueMessage(self.manager._message_manager, item)\n for item in msg_dicts]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef list(self, include_claimed=False, echo=False, marker=None, limit=None):\n return self._iterate_list(include_claimed=include_claimed, echo=echo,\n marker=marker, limit=limit)", "response": "List the available resource names."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ndeletes the specified message from its queue.", "response": "def delete(self, msg, claim_id=None):\n \"\"\"\n Deletes the specified message from its queue. If the message has been\n claimed, the ID of that claim must be passed as the 'claim_id'\n parameter.\n \"\"\"\n msg_id = utils.get_id(msg)\n if claim_id:\n uri = \"/%s/%s?claim_id=%s\" % (self.uri_base, msg_id, claim_id)\n else:\n uri = \"/%s/%s\" % (self.uri_base, msg_id)\n return self._delete(uri)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef list_by_ids(self, ids):\n ids = utils.coerce_to_list(ids)\n uri = \"/%s?ids=%s\" % (self.uri_base, \",\".join(ids))\n # The API is not consistent in how it returns message lists, so this\n # workaround is needed.\n curr_prkey = self.plural_response_key\n self.plural_response_key = \"\"\n # BROKEN: API returns a list, not a dict.\n ret = self._list(uri)\n self.plural_response_key = curr_prkey\n return ret", "response": "This method returns a list of all the messages in this queue that match the given IDs."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ndelete the messages whose IDs are passed in from this queue.", "response": "def delete_by_ids(self, ids):\n \"\"\"\n Deletes the messages whose IDs are passed in from this queue.\n \"\"\"\n ids = utils.coerce_to_list(ids)\n uri = \"/%s?ids=%s\" % (self.uri_base, \",\".join(ids))\n return self.api.method_delete(uri)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nclaiming up to `count` unclaimed messages from this queue. If count is not specified, the default is to claim 10 messages. The `ttl` parameter specifies how long the server should wait before releasing the claim. The ttl value MUST be between 60 and 43200 seconds. The `grace` parameter is the message grace period in seconds. The value of grace MUST be between 60 and 43200 seconds. The server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus a specified grace period to deal with crashed workers (up to 1209600 or 14 days including claim lifetime). If a claimed message would normally live longer than the grace period, its expiration will not be adjusted. bReturns a QueueClaim object, whose 'messages' attribute contains the list of QueueMessage objects representing the claimed messages.", "response": "def claim(self, ttl, grace, count=None):\n \"\"\"\n Claims up to `count` unclaimed messages from this queue. If count is\n not specified, the default is to claim 10 messages.\n\n The `ttl` parameter specifies how long the server should wait before\n releasing the claim. The ttl value MUST be between 60 and 43200 seconds.\n\n The `grace` parameter is the message grace period in seconds. The value\n of grace MUST be between 60 and 43200 seconds. The server extends the\n lifetime of claimed messages to be at least as long as the lifetime of\n the claim itself, plus a specified grace period to deal with crashed\n workers (up to 1209600 or 14 days including claim lifetime). If a\n claimed message would normally live longer than the grace period, its\n expiration will not be adjusted.\n\n bReturns a QueueClaim object, whose 'messages' attribute contains the\n list of QueueMessage objects representing the claimed messages.\n \"\"\"\n if count is None:\n qs = \"\"\n else:\n qs = \"?limit=%s\" % count\n uri = \"/%s%s\" % (self.uri_base, qs)\n body = {\"ttl\": ttl,\n \"grace\": grace,\n }\n resp, resp_body = self.api.method_post(uri, body=body)\n if resp.status_code == 204:\n # Nothing available to claim\n return None\n # Get the claim ID from the first message in the list.\n href = resp_body[0][\"href\"]\n claim_id = href.split(\"claim_id=\")[-1]\n return self.get(claim_id)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update(self, claim, ttl=None, grace=None):\n body = {}\n if ttl is not None:\n body[\"ttl\"] = ttl\n if grace is not None:\n body[\"grace\"] = grace\n if not body:\n raise exc.MissingClaimParameters(\"You must supply a value for \"\n \"'ttl' or 'grace' when calling 'update()'\")\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(claim))\n resp, resp_body = self.api.method_patch(uri, body=body)", "response": "Updates the specified resource in the specified resource store."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get(self, id_):\n if self.api.queue_exists(id_):\n return Queue(self, {\"queue\": {\"name\": id_, \"id_\": id_}}, key=\"queue\")\n raise exc.NotFound(\"The queue '%s' does not exist.\" % id_)", "response": "Get a specific queue by ID."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_stats(self, queue):\n uri = \"/%s/%s/stats\" % (self.uri_base, utils.get_id(queue))\n resp, resp_body = self.api.method_get(uri)\n return resp_body.get(\"messages\")", "response": "Returns the message stats for the specified queue."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the metadata for the specified queue.", "response": "def get_metadata(self, queue):\n \"\"\"\n Returns the metadata for the specified queue.\n \"\"\"\n uri = \"/%s/%s/metadata\" % (self.uri_base, utils.get_id(queue))\n resp, resp_body = self.api.method_get(uri)\n return resp_body"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef set_metadata(self, queue, metadata, clear=False):\n uri = \"/%s/%s/metadata\" % (self.uri_base, utils.get_id(queue))\n if clear:\n curr = {}\n else:\n curr = self.get_metadata(queue)\n curr.update(metadata)\n resp, resp_body = self.api.method_put(uri, body=curr)", "response": "Sets the metadata for the specified queue."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _configure_manager(self):\n self._manager = QueueManager(self,\n resource_class=Queue, response_key=\"queue\",\n uri_base=\"queues\")", "response": "Create the manager to handle queues."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _add_custom_headers(self, dct):\n if self.client_id is None:\n self.client_id = os.environ.get(\"CLOUD_QUEUES_ID\")\n if self.client_id:\n dct[\"Client-ID\"] = self.client_id", "response": "Add the Client - ID header required by Cloud Queues\n "} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _api_request(self, uri, method, **kwargs):\n try:\n return super(QueueClient, self)._api_request(uri, method, **kwargs)\n except exc.BadRequest as e:\n if ((e.code == \"400\") and\n (e.message == 'The \"Client-ID\" header is required.')):\n raise exc.QueueClientIDNotDefined(\"You must supply a client ID \"\n \"to work with Queue messages.\")\n else:\n raise", "response": "This is the base method for handling the necessary exceptions."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning True if the named queue exists False otherwise.", "response": "def queue_exists(self, name):\n \"\"\"\n Returns True or False, depending on the existence of the named queue.\n \"\"\"\n try:\n queue = self._manager.head(name)\n return True\n except exc.NotFound:\n return False"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a new queue with the given name.", "response": "def create(self, name):\n \"\"\"\n Cloud Queues works differently, in that they use the name as the ID for\n the resource. So for create(), we need to check if a queue by that name\n exists already, and raise an exception if it does. If not, create the\n queue and return a reference object for it.\n \"\"\"\n if self.queue_exists(name):\n raise exc.DuplicateQueue(\"The queue '%s' already exists.\" % name)\n return self._manager.create(name)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef set_metadata(self, queue, metadata, clear=False):\n return self._manager.set_metadata(queue, metadata, clear=clear)", "response": "Sets the metadata for the specified queue."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef delete_message(self, queue, msg_id, claim_id=None):\n return queue.delete_message(msg_id, claim_id=claim_id)", "response": "Deletes the message with the supplied ID from the specified queue."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list_messages(self, queue, include_claimed=False, echo=False,\n marker=None, limit=None):\n \"\"\"\n Returns a list of messages for the specified queue.\n\n By default only unclaimed messages are returned; if you want claimed\n messages included, pass `include_claimed=True`. Also, the requester's\n own messages are not returned by default; if you want them included,\n pass `echo=True`.\n\n The 'marker' and 'limit' parameters are used to control pagination of\n results. 'Marker' is the ID of the last message returned, while 'limit'\n controls the number of messages returned per reuqest (default=20).\n \"\"\"\n return queue.list(include_claimed=include_claimed, echo=echo,\n marker=marker, limit=limit)", "response": "Returns a list of all messages for the specified queue."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef claim_messages(self, queue, ttl, grace, count=None):\n return queue.claim_messages(ttl, grace, count=count)", "response": "Returns a new QueueClaim object that contains the messages claimed by the server."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update_claim(self, queue, claim, ttl=None, grace=None):\n return queue.update_claim(claim, ttl=ttl, grace=grace)", "response": "Updates the specified claim in the specified queue."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncreate the Manager instances to handle monitoring.", "response": "def _configure_manager(self):\n \"\"\"\n Creates the Manager instances to handle monitoring.\n \"\"\"\n self._flavor_manager = CloudCDNFlavorManager(self,\n uri_base=\"flavors\", resource_class=CloudCDNFlavor,\n response_key=None, plural_response_key=\"flavors\")\n self._services_manager = CloudCDNServiceManager(self,\n uri_base=\"services\", resource_class=CloudCDNService,\n response_key=None, plural_response_key=\"services\")"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef create_service(self, name, flavor_id, domains, origins,\n restrictions=None, caching=None):\n \"\"\"Create a new CDN service.\n\n Arguments:\n name: The name of the service.\n flavor_id: The ID of the flavor to use for this service.\n domains: A list of dictionaries, each of which has a required\n key \"domain\" and optional key \"protocol\" (the default\n protocol is http).\n origins: A list of dictionaries, each of which has a required\n key \"origin\" which is the URL or IP address to pull\n origin content from. Optional keys include \"port\" to\n use a port other than the default of 80, and \"ssl\"\n to enable SSL, which is disabled by default.\n caching: An optional\n\n \"\"\"\n return self._services_manager.create(name, flavor_id, domains, origins,\n restrictions, caching)", "response": "Create a new CDN service."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete_assets(self, service_id, url=None, all=False):\n self._services_manager.delete_assets(service_id, url, all)", "response": "Delete CDN assets associated with a service_id."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef url_for(self, attr=None, filter_value=None,\n service_type=None, endpoint_type=\"publicURL\",\n service_name=None, volume_service_name=None):\n \"\"\"Fetches the public URL from the given service for\n a particular endpoint attribute. If none given, returns\n the first. See tests for sample service catalog.\"\"\"\n matching_endpoints = []\n # We don't always get a service catalog back ...\n if \"serviceCatalog\" not in self.catalog[\"access\"]:\n return None\n\n # Full catalog ...\n catalog = self.catalog[\"access\"][\"serviceCatalog\"]\n for service in catalog:\n if service.get(\"type\") != service_type:\n continue\n endpoints = service[\"endpoints\"]\n for endpoint in endpoints:\n if not filter_value or endpoint.get(attr) == filter_value:\n endpoint[\"serviceName\"] = service.get(\"name\")\n matching_endpoints.append(endpoint)\n\n if not matching_endpoints:\n raise exc.EndpointNotFound()\n elif len(matching_endpoints) > 1:\n raise exc.AmbiguousEndpoints(endpoints=matching_endpoints)\n else:\n return matching_endpoints[0][endpoint_type]", "response": "Fetches the public URL for a particular endpoint attribute. If none given returns the first endpoint. If none given returns the first endpoint."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget the usage records for this load balancer.", "response": "def get_usage(self, start=None, end=None):\n \"\"\"\n Return the usage records for this load balancer. You may optionally\n include a start datetime or an end datetime, or both, which will limit\n the records to those on or after the start time, and those before or on\n the end time. These times should be Python datetime.datetime objects,\n Python datetime.date objects, or strings in the format:\n \"YYYY-MM-DD HH:MM:SS\" or \"YYYY-MM-DD\".\n \"\"\"\n return self.manager.get_usage(self, start=start, end=end)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _add_details(self, info):\n for (key, val) in six.iteritems(info):\n if key == \"nodes\":\n val = [Node(parent=self, **nd) for nd in val]\n elif key == \"sessionPersistence\":\n val = val['persistenceType']\n elif key == \"cluster\":\n val = val['name']\n elif key == \"virtualIps\":\n key = \"virtual_ips\"\n val = [VirtualIP(parent=self, **vip) for vip in val]\n setattr(self, key, val)", "response": "Override the base behavior to add Nodes VirtualIPs etc."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update(self, name=None, algorithm=None, protocol=None, halfClosed=None,\n port=None, timeout=None, httpsRedirect=None):\n \"\"\"\n Provides a way to modify the following attributes of a load balancer:\n - name\n - algorithm\n - protocol\n - halfClosed\n - port\n - timeout\n - httpsRedirect\n \"\"\"\n return self.manager.update(self, name=name, algorithm=algorithm,\n protocol=protocol, halfClosed=halfClosed, port=port,\n timeout=timeout, httpsRedirect=httpsRedirect)", "response": "Updates the attributes of the current load balancer."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef update_node(self, node, diff=None):\n return self.manager.update_node(node, diff=diff)", "response": "Updates the attributes of a node."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_health_monitor(self, type, delay=10, timeout=10,\n attemptsBeforeDeactivation=3, path=\"/\", statusRegex=None,\n bodyRegex=None, hostHeader=None):\n \"\"\"\n Adds a health monitor to the load balancer. If a monitor already\n exists, it is updated with the supplied settings.\n \"\"\"\n abd = attemptsBeforeDeactivation\n return self.manager.add_health_monitor(self, type=type, delay=delay,\n timeout=timeout, attemptsBeforeDeactivation=abd,\n path=path, statusRegex=statusRegex, bodyRegex=bodyRegex,\n hostHeader=hostHeader)", "response": "Adds a health monitor to the load balancer."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef add_connection_throttle(self, maxConnectionRate=None,\n maxConnections=None, minConnections=None, rateInterval=None):\n \"\"\"\n Updates the connection throttling information for the load balancer with\n the supplied values. At least one of the parameters must be supplied.\n \"\"\"\n if not any((maxConnectionRate, maxConnections, minConnections,\n rateInterval)):\n # Pointless call\n return\n return self.manager.add_connection_throttle(self,\n maxConnectionRate=maxConnectionRate, maxConnections=maxConnections,\n minConnections=minConnections, rateInterval=rateInterval)", "response": "Adds a connection throttling information to the load balancer with the supplied parameters."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef add_ssl_termination(self, securePort, privatekey, certificate,\n intermediateCertificate=None, enabled=True,\n secureTrafficOnly=False):\n \"\"\"\n Adds SSL termination information to the load balancer. If SSL\n termination has already been configured, it is updated with the\n supplied settings.\n \"\"\"\n return self.manager.add_ssl_termination(self, securePort=securePort,\n privatekey=privatekey, certificate=certificate,\n intermediateCertificate=intermediateCertificate,\n enabled=enabled, secureTrafficOnly=secureTrafficOnly)", "response": "Adds SSL termination information to the load balancer."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nupdates existing SSL termination information for the load balancer.", "response": "def update_ssl_termination(self, securePort=None, enabled=None,\n secureTrafficOnly=None):\n \"\"\"\n Updates existing SSL termination information for the load balancer\n without affecting the existing certificates/keys.\n \"\"\"\n return self.manager.update_ssl_termination(self, securePort=securePort,\n enabled=enabled, secureTrafficOnly=secureTrafficOnly)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef set_metadata_for_node(self, node, metadata):\n return self.manager.set_metadata(self, metadata, node=node)", "response": "Sets the metadata for the specified node to the supplied dictionary containing the node s metadata."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update_metadata_for_node(self, node, metadata):\n return self.manager.update_metadata(self, metadata, node=node)", "response": "Updates the existing metadata for the specified node with the supplied dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef delete_metadata_for_node(self, node, keys=None):\n return self.manager.delete_metadata(self, keys=keys, node=node)", "response": "Deletes metadata items for the specified node."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nupdating the attributes of a load balancer.", "response": "def update(self, lb, name=None, algorithm=None, protocol=None,\n halfClosed=None, port=None, timeout=None, httpsRedirect=None):\n \"\"\"\n Provides a way to modify the following attributes of a load balancer:\n - name\n - algorithm\n - protocol\n - halfClosed\n - port\n - timeout\n - httpsRedirect\n \"\"\"\n body = {}\n if name is not None:\n body[\"name\"] = name\n if algorithm is not None:\n body[\"algorithm\"] = algorithm\n if protocol is not None:\n body[\"protocol\"] = protocol\n if halfClosed is not None:\n body[\"halfClosed\"] = halfClosed\n if port is not None:\n body[\"port\"] = port\n if timeout is not None:\n body[\"timeout\"] = timeout\n if httpsRedirect is not None:\n body[\"httpsRedirect\"] = httpsRedirect\n if not body:\n # Nothing passed\n return\n body = {\"loadBalancer\": body}\n uri = \"/loadbalancers/%s\" % utils.get_id(lb)\n try:\n resp, resp_body = self.api.method_put(uri, body=body)\n except exc.ClientException as e:\n message = e.message\n details = e.details\n if message and details:\n errmsg = \"%s - %s\" % (message, details)\n else:\n errmsg = message\n raise exc.InvalidLoadBalancerParameters(errmsg)\n return resp, resp_body"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _create_body(self, name, port=None, protocol=None, nodes=None,\n virtual_ips=None, algorithm=None, halfClosed=None, accessList=None,\n connectionLogging=None, connectionThrottle=None, healthMonitor=None,\n metadata=None, timeout=None, sessionPersistence=None,\n httpsRedirect=None):\n \"\"\"\n Used to create the dict required to create a load balancer instance.\n \"\"\"\n required = (virtual_ips, port, protocol)\n if not all(required):\n raise exc.MissingLoadBalancerParameters(\"Load Balancer creation \"\n \"requires at least one virtual IP, a protocol, and a port.\")\n nodes = utils.coerce_to_list(nodes)\n virtual_ips = utils.coerce_to_list(virtual_ips)\n bad_conditions = [node.condition for node in nodes\n if node.condition.upper() not in (\"ENABLED\", \"DISABLED\")]\n if bad_conditions:\n raise exc.InvalidNodeCondition(\"Nodes for new load balancer must be \"\n \"created in either 'ENABLED' or 'DISABLED' condition; \"\n \"received the following invalid conditions: %s\" %\n \", \".join(set(bad_conditions)))\n node_dicts = [nd.to_dict() for nd in nodes]\n vip_dicts = [vip.to_dict() for vip in virtual_ips]\n body = {\"loadBalancer\": {\n \"name\": name,\n \"port\": port,\n \"protocol\": protocol,\n \"nodes\": node_dicts,\n \"virtualIps\": vip_dicts,\n \"algorithm\": algorithm or \"RANDOM\",\n \"halfClosed\": halfClosed,\n \"accessList\": accessList,\n \"connectionLogging\": connectionLogging,\n \"connectionThrottle\": connectionThrottle,\n \"healthMonitor\": healthMonitor,\n \"metadata\": metadata,\n \"timeout\": timeout,\n \"sessionPersistence\": sessionPersistence,\n \"httpsRedirect\": httpsRedirect,\n }}\n return body", "response": "Create the dict required to create a load balancer instance."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef add_nodes(self, lb, nodes):\n if not isinstance(nodes, (list, tuple)):\n nodes = [nodes]\n node_dicts = [nd.to_dict() for nd in nodes]\n resp, body = self.api.method_post(\"/loadbalancers/%s/nodes\" % lb.id,\n body={\"nodes\": node_dicts})\n return resp, body", "response": "Adds the list of nodes to the specified load balancer."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nremove the node from its load balancer.", "response": "def delete_node(self, loadbalancer, node):\n \"\"\"Removes the node from its load balancer.\"\"\"\n lb = node.parent\n if not lb:\n raise exc.UnattachedNode(\"No parent Load Balancer for this node \"\n \"could be determined.\")\n resp, body = self.api.method_delete(\"/loadbalancers/%s/nodes/%s\" %\n (lb.id, node.id))\n return resp, body"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef update_node(self, node, diff=None):\n lb = node.parent\n if not lb:\n raise exc.UnattachedNode(\"No parent Load Balancer for this node \"\n \"could be determined.\")\n if diff is None:\n diff = node._diff()\n req_body = {\"node\": diff}\n resp, body = self.api.method_put(\"/loadbalancers/%s/nodes/%s\" %\n (lb.id, node.id), body=req_body)\n return resp, body", "response": "Updates the attributes of a node."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nadds the VirtualIP to the specified load balancer.", "response": "def add_virtualip(self, lb, vip):\n \"\"\"Adds the VirtualIP to the specified load balancer.\"\"\"\n resp, body = self.api.method_post(\"/loadbalancers/%s/virtualips\" % lb.id,\n body=vip.to_dict())\n return resp, body"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef delete_virtualip(self, loadbalancer, vip):\n lb = vip.parent\n if not lb:\n raise exc.UnattachedVirtualIP(\"No parent Load Balancer for this \"\n \"VirtualIP could be determined.\")\n resp, body = self.api.method_delete(\"/loadbalancers/%s/virtualips/%s\" %\n (lb.id, vip.id))\n return resp, body", "response": "Deletes the VirtualIP from its load balancer."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_access_list(self, loadbalancer, access_list):\n req_body = {\"accessList\": access_list}\n uri = \"/loadbalancers/%s/accesslist\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_post(uri, body=req_body)\n return body", "response": "Adds the provided list to the load balancer."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nremove the access list from the load balancer.", "response": "def delete_access_list(self, loadbalancer):\n \"\"\"\n Removes the access list from this load balancer.\n \"\"\"\n uri = \"/loadbalancers/%s/accesslist\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_delete(uri)\n return body"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef delete_access_list_items(self, loadbalancer, item_ids):\n if not isinstance(item_ids, (list, tuple)):\n item_ids = [item_ids]\n valid_ids = [itm[\"id\"] for itm in self.get_access_list(loadbalancer)]\n bad_ids = [str(itm) for itm in item_ids if itm not in valid_ids]\n if bad_ids:\n raise exc.AccessListIDNotFound(\"The following ID(s) are not valid \"\n \"Access List items: %s\" % \", \".join(bad_ids))\n items = \"&\".join([\"id=%s\" % item_id for item_id in item_ids])\n uri = \"/loadbalancers/%s/accesslist?%s\" % (\n utils.get_id(loadbalancer), items)\n # TODO: add the item ids\n resp, body = self.api.method_delete(uri)\n return body", "response": "Removes the items from the load balancer s access list that match the provided IDs."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_health_monitor(self, loadbalancer):\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body.get(\"healthMonitor\", {})", "response": "Returns the health monitor configuration for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,\n attemptsBeforeDeactivation=3, path=\"/\", statusRegex=None,\n bodyRegex=None, hostHeader=None):\n \"\"\"\n Adds a health monitor to the load balancer. If a monitor already\n exists, it is updated with the supplied settings.\n \"\"\"\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n req_body = {\"healthMonitor\": {\n \"type\": type,\n \"delay\": delay,\n \"timeout\": timeout,\n \"attemptsBeforeDeactivation\": attemptsBeforeDeactivation,\n }}\n uptype = type.upper()\n if uptype.startswith(\"HTTP\"):\n lb = self._get_lb(loadbalancer)\n if uptype != lb.protocol:\n raise exc.ProtocolMismatch(\"Cannot set the Health Monitor type \"\n \"to '%s' when the Load Balancer's protocol is '%s'.\" %\n (type, lb.protocol))\n if not all((path, statusRegex, bodyRegex)):\n raise exc.MissingHealthMonitorSettings(\"When creating an HTTP(S) \"\n \"monitor, you must provide the 'path', 'statusRegex' and \"\n \"'bodyRegex' parameters.\")\n body_hm = req_body[\"healthMonitor\"]\n body_hm[\"path\"] = path\n body_hm[\"statusRegex\"] = statusRegex\n body_hm[\"bodyRegex\"] = bodyRegex\n if hostHeader:\n body_hm[\"hostHeader\"] = hostHeader\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "response": "Adds a health monitor to the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef add_connection_throttle(self, loadbalancer, maxConnectionRate=None,\n maxConnections=None, minConnections=None, rateInterval=None):\n \"\"\"\n Creates or updates the connection throttling information for the load\n balancer. When first creating the connection throttle, all 4 parameters\n must be supplied. When updating an existing connection throttle, at\n least one of the parameters must be supplied.\n \"\"\"\n settings = {}\n if maxConnectionRate:\n settings[\"maxConnectionRate\"] = maxConnectionRate\n if maxConnections:\n settings[\"maxConnections\"] = maxConnections\n if minConnections:\n settings[\"minConnections\"] = minConnections\n if rateInterval:\n settings[\"rateInterval\"] = rateInterval\n req_body = {\"connectionThrottle\": settings}\n uri = \"/loadbalancers/%s/connectionthrottle\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "response": "Adds or updates the connection throttling information for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_ssl_termination(self, loadbalancer):\n uri = \"/loadbalancers/%s/ssltermination\" % utils.get_id(loadbalancer)\n try:\n resp, body = self.api.method_get(uri)\n except exc.NotFound:\n # For some reason, instead of returning an empty dict like the\n # other API GET calls, this raises a 404.\n return {}\n return body.get(\"sslTermination\", {})", "response": "Returns a dictionary representing the SSL termination configuration for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_ssl_termination(self, loadbalancer, securePort, privatekey, certificate,\n intermediateCertificate, enabled=True, secureTrafficOnly=False):\n \"\"\"\n Adds SSL termination information to the load balancer. If SSL termination\n has already been configured, it is updated with the supplied settings.\n \"\"\"\n uri = \"/loadbalancers/%s/ssltermination\" % utils.get_id(loadbalancer)\n req_body = {\"sslTermination\": {\n \"certificate\": certificate,\n \"enabled\": enabled,\n \"secureTrafficOnly\": secureTrafficOnly,\n \"privatekey\": privatekey,\n \"intermediateCertificate\": intermediateCertificate,\n \"securePort\": securePort,\n }}\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "response": "Adds SSL termination information to the load balancer."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,\n secureTrafficOnly=None):\n \"\"\"\n Updates existing SSL termination information for the load balancer\n without affecting the existing certificates/keys.\n \"\"\"\n ssl_info = self.get_ssl_termination(loadbalancer)\n if not ssl_info:\n raise exc.NoSSLTerminationConfiguration(\"You must configure SSL \"\n \"termination on this load balancer before attempting \"\n \"to update it.\")\n if securePort is None:\n securePort = ssl_info[\"securePort\"]\n if enabled is None:\n enabled = ssl_info[\"enabled\"]\n if secureTrafficOnly is None:\n secureTrafficOnly = ssl_info[\"secureTrafficOnly\"]\n uri = \"/loadbalancers/%s/ssltermination\" % utils.get_id(loadbalancer)\n req_body = {\"sslTermination\": {\n \"enabled\": enabled,\n \"secureTrafficOnly\": secureTrafficOnly,\n \"securePort\": securePort,\n }}\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "response": "Updates the SSL termination information for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_metadata(self, loadbalancer, node=None, raw=False):\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata\" % (\n utils.get_id(loadbalancer), utils.get_id(node))\n else:\n uri = \"/loadbalancers/%s/metadata\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n meta = body.get(\"metadata\", [])\n if raw:\n return meta\n ret = dict([(itm[\"key\"], itm[\"value\"]) for itm in meta])\n return ret", "response": "Returns the current metadata for the load balancer. If node is provided returns the current metadata for that node. If raw is True returns the current metadata for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef set_metadata(self, loadbalancer, metadata, node=None):\n # Delete any existing metadata\n self.delete_metadata(loadbalancer, node=node)\n # Convert the metadata dict into the list format\n metadata_list = [{\"key\": key, \"value\": val}\n for key, val in metadata.items()]\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata\" % (\n utils.get_id(loadbalancer), utils.get_id(node))\n else:\n uri = \"/loadbalancers/%s/metadata\" % utils.get_id(loadbalancer)\n req_body = {\"metadata\": metadata_list}\n resp, body = self.api.method_post(uri, body=req_body)\n return body", "response": "Sets the metadata for the load balancer to the supplied dictionary."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the existing metadata with the supplied dictionary.", "response": "def update_metadata(self, loadbalancer, metadata, node=None):\n \"\"\"\n Updates the existing metadata with the supplied dictionary. If\n 'node' is supplied, the metadata for that node is updated instead\n of for the load balancer.\n \"\"\"\n # Get the existing metadata\n md = self.get_metadata(loadbalancer, raw=True)\n id_lookup = dict([(itm[\"key\"], itm[\"id\"]) for itm in md])\n metadata_list = []\n # Updates must be done individually\n for key, val in metadata.items():\n try:\n meta_id = id_lookup[key]\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata/%s\" % (\n utils.get_id(loadbalancer), utils.get_id(node),\n meta_id)\n else:\n uri = \"/loadbalancers/%s/metadata\" % utils.get_id(loadbalancer)\n req_body = {\"meta\": {\"value\": val}}\n resp, body = self.api.method_put(uri, body=req_body)\n except KeyError:\n # Not an existing key; add to metadata_list\n metadata_list.append({\"key\": key, \"value\": val})\n if metadata_list:\n # New items; POST them\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata\" % (\n utils.get_id(loadbalancer), utils.get_id(node))\n else:\n uri = \"/loadbalancers/%s/metadata\" % utils.get_id(loadbalancer)\n req_body = {\"metadata\": metadata_list}\n resp, body = self.api.method_post(uri, body=req_body)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ndelete the metadata for the specified keys from the load balancer.", "response": "def delete_metadata(self, loadbalancer, keys=None, node=None):\n \"\"\"\n Deletes metadata items specified by the 'keys' parameter. If no value\n for 'keys' is provided, all metadata is deleted. If 'node' is supplied,\n the metadata for that node is deleted instead of the load balancer.\n \"\"\"\n if keys and not isinstance(keys, (list, tuple)):\n keys = [keys]\n md = self.get_metadata(loadbalancer, node=node, raw=True)\n if keys:\n md = [dct for dct in md if dct[\"key\"] in keys]\n if not md:\n # Nothing to do; log it? Raise an error?\n return\n id_list = \"&\".join([\"id=%s\" % itm[\"id\"] for itm in md])\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata?%s\" % (\n utils.get_id(loadbalancer), utils.get_id(node), id_list)\n else:\n uri = \"/loadbalancers/%s/metadata?%s\" % (\n utils.get_id(loadbalancer), id_list)\n resp, body = self.api.method_delete(uri)\n return body"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_error_page(self, loadbalancer):\n uri = \"/loadbalancers/%s/errorpage\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body", "response": "Returns the default error page for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nsetting the custom error page for the load balancer.", "response": "def set_error_page(self, loadbalancer, html):\n \"\"\"\n A single custom error page may be added per account load balancer\n with an HTTP protocol. Page updates will override existing content.\n If a custom error page is deleted, or the load balancer is changed\n to a non-HTTP protocol, the default error page will be restored.\n \"\"\"\n uri = \"/loadbalancers/%s/errorpage\" % utils.get_id(loadbalancer)\n req_body = {\"errorpage\": {\"content\": html}}\n resp, body = self.api.method_put(uri, body=req_body)\n return body"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_usage(self, loadbalancer=None, start=None, end=None):\n if start is end is None:\n period = None\n else:\n parts = []\n startStr = utils.iso_time_string(start)\n if startStr:\n parts.append(\"startTime=%s\" % startStr)\n endStr = utils.iso_time_string(end)\n if endStr:\n parts.append(\"endTime=%s\" % endStr)\n period = \"&\".join(parts).strip(\"&\")\n if loadbalancer is None:\n uri = \"/loadbalancers/usage\"\n else:\n uri = \"/loadbalancers/%s/usage\" % utils.get_id(loadbalancer)\n if period:\n uri = \"%s?%s\" % (uri, period)\n resp, body = self.api.method_get(uri)\n return body", "response": "Returns the usage records for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the session persistence setting for the given load balancer.", "response": "def get_session_persistence(self, loadbalancer):\n \"\"\"\n Returns the session persistence setting for the given load balancer.\n \"\"\"\n uri = \"/loadbalancers/%s/sessionpersistence\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n ret = body[\"sessionPersistence\"].get(\"persistenceType\", \"\")\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nsets the session persistence for the given load balancer.", "response": "def set_session_persistence(self, loadbalancer, val):\n \"\"\"\n Sets the session persistence for the given load balancer.\n \"\"\"\n val = val.upper()\n uri = \"/loadbalancers/%s/sessionpersistence\" % utils.get_id(loadbalancer)\n req_body = {\"sessionPersistence\": {\n \"persistenceType\": val,\n }}\n resp, body = self.api.method_put(uri, body=req_body)\n return body"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the connection logging settings for the given load balancer.", "response": "def get_connection_logging(self, loadbalancer):\n \"\"\"\n Returns the connection logging setting for the given load balancer.\n \"\"\"\n uri = \"/loadbalancers/%s/connectionlogging\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n ret = body.get(\"connectionLogging\", {}).get(\"enabled\", False)\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef set_connection_logging(self, loadbalancer, val):\n uri = \"/loadbalancers/%s/connectionlogging\" % utils.get_id(loadbalancer)\n val = str(val).lower()\n req_body = {\"connectionLogging\": {\n \"enabled\": val,\n }}\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "response": "Sets the connection logging for the given load balancer."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns either a loadbalancer or the ID of a CloudLoadBalancer instance.", "response": "def _get_lb(self, lb_or_id):\n \"\"\"\n Accepts either a loadbalancer or the ID of a loadbalancer, and returns\n the CloudLoadBalancer instance.\n \"\"\"\n if isinstance(lb_or_id, CloudLoadBalancer):\n ret = lb_or_id\n else:\n ret = self.get(lb_or_id)\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nconverts this Node to a dict representation for passing to the API.", "response": "def to_dict(self):\n \"\"\"Convert this Node to a dict representation for passing to the API.\"\"\"\n return {\"address\": self.address,\n \"port\": self.port,\n \"condition\": self.condition,\n \"type\": self.type,\n \"id\": self.id,\n }"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the load object with the current values.", "response": "def update(self):\n \"\"\"\n Pushes any local changes to the object up to the actual load\n balancer node.\n \"\"\"\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "response": "Returns a reference to the device that is represented by this node."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef to_dict(self):\n if self.id:\n return {\"id\": self.id}\n return {\"type\": self.type, \"ipVersion\": self.ip_version}", "response": "Convert this VirtualIP to a dict representation for passing to the API."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _configure_manager(self):\n self._manager = CloudLoadBalancerManager(self,\n resource_class=CloudLoadBalancer,\n response_key=\"loadBalancer\", uri_base=\"loadbalancers\")", "response": "Creates a manager to handle the load balancers."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_usage(self, loadbalancer=None, start=None, end=None):\n return self._manager.get_usage(loadbalancer=loadbalancer, start=start,\n end=end)", "response": "Returns the usage records for this load balancer."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef allowed_domains(self):\n if self._allowed_domains is None:\n uri = \"/loadbalancers/alloweddomains\"\n resp, body = self.method_get(uri)\n dom_list = body[\"allowedDomains\"]\n self._allowed_domains = [itm[\"allowedDomain\"][\"name\"]\n for itm in dom_list]\n return self._allowed_domains", "response": "This property lists the allowed domains for a load balancer."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef algorithms(self):\n if self._algorithms is None:\n uri = \"/loadbalancers/algorithms\"\n resp, body = self.method_get(uri)\n self._algorithms = [alg[\"name\"] for alg in body[\"algorithms\"]]\n return self._algorithms", "response": "Returns a list of available load balancing algorithms."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef protocols(self):\n if self._protocols is None:\n uri = \"/loadbalancers/protocols\"\n resp, body = self.method_get(uri)\n self._protocols = [proto[\"name\"] for proto in body[\"protocols\"]]\n return self._protocols", "response": "Returns a list of available load balancing protocols."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update(self, loadbalancer, name=None, algorithm=None, protocol=None,\n halfClosed=None, port=None, timeout=None, httpsRedirect=None):\n \"\"\"\n Provides a way to modify the following attributes of a load balancer:\n - name\n - algorithm\n - protocol\n - halfClosed\n - port\n - timeout\n - httpsRedirect\n \"\"\"\n return self._manager.update(loadbalancer, name=name,\n algorithm=algorithm, protocol=protocol, halfClosed=halfClosed,\n port=port, timeout=timeout, httpsRedirect=httpsRedirect)", "response": "Updates the attributes of a load balancer."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef add_connection_throttle(self, loadbalancer, maxConnectionRate=None,\n maxConnections=None, minConnections=None, rateInterval=None):\n \"\"\"\n Updates the connection throttling information for the load balancer with\n the supplied values. At least one of the parameters must be supplied.\n \"\"\"\n return loadbalancer.add_connection_throttle(\n maxConnectionRate=maxConnectionRate, maxConnections=maxConnections,\n minConnections=minConnections, rateInterval=rateInterval)", "response": "Adds a connection throttling information to the load balancer."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,\n secureTrafficOnly=None):\n \"\"\"\n Updates existing SSL termination information for the load balancer\n without affecting the existing certificates/keys.\n \"\"\"\n return loadbalancer.update_ssl_termination(securePort=securePort,\n enabled=enabled, secureTrafficOnly=secureTrafficOnly)", "response": "Updates existing SSL termination information for the load balancer."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef delete_metadata_for_node(self, loadbalancer, node, keys=None):\n return loadbalancer.delete_metadata_for_node(node, keys=keys)", "response": "Deletes metadata items specified by the keys parameter for the specified node. If no keys is provided all metadata is deleted."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn an instance of a ClientException or subclass based on an httplib2 response.", "response": "def from_response(response, body):\n \"\"\"\n Return an instance of a ClientException or subclass\n based on an httplib2 response.\n\n Usage::\n\n resp, body = http.request(...)\n if resp.status_code != 200:\n raise exception_from_response(resp, body)\n \"\"\"\n if isinstance(response, dict):\n status = response.get(\"status_code\")\n else:\n status = response.status_code\n cls = _code_map.get(int(status), ClientException)\n\n# import pyrax\n# pyrax.utils.trace()\n\n request_id = response.headers.get(\"x-compute-request-id\")\n if body:\n message = \"n/a\"\n details = \"n/a\"\n if isinstance(body, dict):\n message = body.get(\"message\")\n details = body.get(\"details\")\n if message is details is None:\n error = body[next(iter(body))]\n if isinstance(error, dict):\n message = error.get(\"message\", None)\n details = error.get(\"details\", None)\n else:\n message = error\n details = None\n else:\n message = body\n return cls(code=status, message=message, details=details,\n request_id=request_id)\n else:\n return cls(code=status, request_id=request_id)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nconvert a image ID passed as the 'image' parameter to a image object.", "response": "def assure_image(fnc):\n \"\"\"\n Converts a image ID passed as the 'image' parameter to a image object.\n \"\"\"\n @wraps(fnc)\n def _wrapped(self, img, *args, **kwargs):\n if not isinstance(img, Image):\n # Must be the ID\n img = self._manager.get(img)\n return fnc(self, img, *args, **kwargs)\n return _wrapped"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list(self, limit=None, marker=None, name=None, visibility=None,\n member_status=None, owner=None, tag=None, status=None,\n size_min=None, size_max=None, sort_key=None, sort_dir=None,\n return_raw=False):\n \"\"\"\n Returns a list of resource objects. Pagination is supported through the\n optional 'marker' and 'limit' parameters. Filtering the returned value\n is possible by specifying values for any of the other parameters.\n \"\"\"\n uri = \"/%s\" % self.uri_base\n qs = utils.dict_to_qs(dict(limit=limit, marker=marker, name=name,\n visibility=visibility, member_status=member_status,\n owner=owner, tag=tag, status=status, size_min=size_min,\n size_max=size_max, sort_key=sort_key, sort_dir=sort_dir))\n if qs:\n uri = \"%s?%s\" % (uri, qs)\n return self._list(uri, return_raw=return_raw)", "response": "Returns a list of resource objects in the base system."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_all(self, name=None, visibility=None, member_status=None,\n owner=None, tag=None, status=None, size_min=None, size_max=None,\n sort_key=None, sort_dir=None):\n \"\"\"\n Returns all of the images in one call, rather than in paginated batches.\n \"\"\"\n\n def strip_version(uri):\n \"\"\"\n The 'next' uri contains a redundant version number. We need to\n strip it to use in the method_get() call.\n \"\"\"\n pos = uri.find(\"/images\")\n return uri[pos:]\n\n obj_class = self.resource_class\n resp, resp_body = self.list(name=name, visibility=visibility,\n member_status=member_status, owner=owner, tag=tag,\n status=status, size_min=size_min, size_max=size_max,\n sort_key=sort_key, sort_dir=sort_dir, return_raw=True)\n data = resp_body.get(self.plural_response_key, resp_body)\n next_uri = strip_version(resp_body.get(\"next\", \"\"))\n ret = [obj_class(manager=self, info=res) for res in data if res]\n while next_uri:\n resp, resp_body = self.api.method_get(next_uri)\n data = resp_body.get(self.plural_response_key, resp_body)\n next_uri = strip_version(resp_body.get(\"next\", \"\"))\n ret.extend([obj_class(manager=self, info=res)\n for res in data if res])\n return ret", "response": "Returns all of the images in one call."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncreating a new image with the specified name.", "response": "def create(self, name, img_format=None, img_container_format=None,\n data=None, container=None, obj=None, metadata=None):\n \"\"\"\n Creates a new image with the specified name. The image data can either\n be supplied directly in the 'data' parameter, or it can be an image\n stored in the object storage service. In the case of the latter, you\n can either supply the container and object names, or simply a\n StorageObject reference.\n\n You may specify the image and image container formats; if unspecified,\n the default of \"vhd\" for image format and \"bare\" for image container\n format will be used.\n\n NOTE: This is blocking, and may take a while to complete.\n \"\"\"\n if img_format is None:\n img_format = \"vhd\"\n if img_container_format is None:\n img_container_format = \"bare\"\n headers = {\n \"X-Image-Meta-name\": name,\n \"X-Image-Meta-disk_format\": img_format,\n \"X-Image-Meta-container_format\": img_container_format,\n }\n if data:\n img_data = data\n else:\n ident = self.api.identity\n region = self.api.region_name\n clt = ident.get_client(\"object_store\", region)\n if not isinstance(obj, StorageObject):\n obj = clt.get_object(container, obj)\n img_data = obj.fetch()\n uri = \"%s/images\" % self.uri_base\n resp, resp_body = self.api.method_post(uri, headers=headers,\n data=img_data)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nupdate the internal cache of an image.", "response": "def update(self, img, value_dict):\n \"\"\"\n Accepts an image reference (object or ID) and dictionary of key/value\n pairs, where the key is an attribute of the image, and the value is the\n desired new value for that image.\n\n NOTE: There is a bug in Glance where the 'add' operation returns a 409\n if the property already exists, which conflicts with the spec. So to\n get around this a fresh copy of the image must be retrieved, and the\n value of 'op' must be determined based on whether this attribute exists\n or not.\n \"\"\"\n img = self.get(img)\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(img))\n body = []\n for key, val in value_dict.items():\n op = \"replace\" if key in img.__dict__ else \"add\"\n body.append({\"op\": op,\n \"path\": \"/%s\" % key,\n \"value\": val})\n headers = {\"Content-Type\":\n \"application/openstack-images-v2.1-json-patch\"}\n resp, resp_body = self.api.method_patch(uri, body=body, headers=headers)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update_image_member(self, img_id, status):\n if status not in (\"pending\", \"accepted\", \"rejected\"):\n raise exc.InvalidImageMemberStatus(\"The status value must be one \"\n \"of 'accepted', 'rejected', or 'pending'. Received: '%s'\" %\n status)\n api = self.api\n project_id = api.identity.tenant_id\n uri = \"/%s/%s/members/%s\" % (self.uri_base, img_id, project_id)\n body = {\"status\": status}\n try:\n resp, resp_body = self.api.method_put(uri, body=body)\n except exc.NotFound as e:\n raise exc.InvalidImageMember(\"The update member request could not \"\n \"be completed. No member request for that image was found.\")", "response": "Updates the image whose ID is given with the status specified."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nwrapping the default create call to handle exceptions.", "response": "def create(self, name, *args, **kwargs):\n \"\"\"\n Need to wrap the default call to handle exceptions.\n \"\"\"\n try:\n return super(ImageMemberManager, self).create(name, *args, **kwargs)\n except Exception as e:\n if e.http_status == 403:\n raise exc.UnsharableImage(\"You cannot share a public image.\")\n else:\n raise"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _create_body(self, name, img=None, cont=None, img_format=None,\n img_name=None):\n \"\"\"\n Used to create a new task. Since tasks don't have names, the required\n 'name' parameter is used for the type of task: 'import' or 'export'.\n \"\"\"\n img = utils.get_id(img)\n cont = utils.get_name(cont)\n body = {\"type\": name}\n if name == \"export\":\n body[\"input\"] = {\n \"image_uuid\": img,\n \"receiving_swift_container\": cont}\n else:\n nm = \"%s/%s\" % (cont, utils.get_name(img))\n body[\"input\"] = {\n \"image_properties\": {\"name\": img_name or img},\n \"import_from\": nm,\n \"import_from_format\": img_format or DEFAULT_FORMAT}\n return body", "response": "Create the body for the new task."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef images(self):\n uri = \"/%s/images\" % self.uri_base\n resp, resp_body = self.api.method_get(uri)\n return resp_body", "response": "Returns a json - schema document that represents an image members entity which is a container of image members entities."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef image(self):\n uri = \"/%s/image\" % self.uri_base\n resp, resp_body = self.api.method_get(uri)\n return resp_body", "response": "Returns a json - schema document that represents a single image entity."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef image_members(self):\n uri = \"/%s/members\" % self.uri_base\n resp, resp_body = self.api.method_get(uri)\n return resp_body", "response": "Returns a json - schema document that represents an image members entity."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a json - schema document that represents an image member entity.", "response": "def image_member(self):\n \"\"\"\n Returns a json-schema document that represents an image member entity.\n (a container of member entities).\n \"\"\"\n uri = \"/%s/member\" % self.uri_base\n resp, resp_body = self.api.method_get(uri)\n return resp_body"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef image_tasks(self):\n uri = \"/%s/tasks\" % self.uri_base\n resp, resp_body = self.api.method_get(uri)\n return resp_body", "response": "Returns a json - schema document that represents a container of tasks\n entities."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn a json - schema document that represents an image task entity.", "response": "def image_task(self):\n \"\"\"\n Returns a json-schema document that represents an task entity.\n \"\"\"\n uri = \"/%s/task\" % self.uri_base\n resp, resp_body = self.api.method_get(uri)\n return resp_body"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates the manager to handle queues.", "response": "def _configure_manager(self):\n \"\"\"\n Create the manager to handle queues.\n \"\"\"\n self._manager = ImageManager(self, resource_class=Image,\n response_key=\"\", plural_response_key=\"images\",\n uri_base=\"images\")\n self._tasks_manager = ImageTasksManager(self, resource_class=ImageTask,\n response_key=\"\", plural_response_key=\"tasks\",\n uri_base=\"tasks\")\n self._schema_manager = JSONSchemaManager(self, resource_class=None,\n response_key=\"\", plural_response_key=\"\", uri_base=\"schemas\")"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a list of resource objects in the object manager.", "response": "def list(self, limit=None, marker=None, name=None, visibility=None,\n member_status=None, owner=None, tag=None, status=None,\n size_min=None, size_max=None, sort_key=None, sort_dir=None):\n \"\"\"\n Returns a list of resource objects. Pagination is supported through the\n optional 'marker' and 'limit' parameters. Filtering the returned value\n is possible by specifying values for any of the other parameters.\n \"\"\"\n return self._manager.list(limit=limit, marker=marker, name=name,\n visibility=visibility, member_status=member_status,\n owner=owner, tag=tag, status=status, size_min=size_min,\n size_max=size_max, sort_key=sort_key, sort_dir=sort_dir)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn all of the images in one call rather than in paginated batches.", "response": "def list_all(self, name=None, visibility=None, member_status=None,\n owner=None, tag=None, status=None, size_min=None, size_max=None,\n sort_key=None, sort_dir=None):\n \"\"\"\n Returns all of the images in one call, rather than in paginated batches.\n The same filtering options available in list() apply here, with the\n obvious exception of limit and marker.\n \"\"\"\n return self._manager.list_all(name=name, visibility=visibility,\n member_status=member_status, owner=owner, tag=tag,\n status=status, size_min=size_min, size_max=size_max,\n sort_key=sort_key, sort_dir=sort_dir)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create(self, name, img_format=None, data=None, container=None,\n obj=None, metadata=None):\n \"\"\"\n Creates a new image with the specified name. The image data can either\n be supplied directly in the 'data' parameter, or it can be an image\n stored in the object storage service. In the case of the latter, you\n can either supply the container and object names, or simply a\n StorageObject reference.\n \"\"\"\n return self._manager.create(name, img_format, data=data,\n container=container, obj=obj)", "response": "Creates a new image with the specified name."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating a task to export the specified image to the swift container named in the cont parameter.", "response": "def export_task(self, img, cont):\n \"\"\"\n Creates a task to export the specified image to the swift container\n named in the 'cont' parameter. If the container does not exist, a\n NoSuchContainer exception is raised.\n\n The 'img' parameter can be either an Image object or the ID of an\n image. If these do not correspond to a valid image, a NotFound\n exception is raised.\n \"\"\"\n return self._tasks_manager.create(\"export\", img=img, cont=cont)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef import_task(self, img, cont, img_format=None, img_name=None):\n return self._tasks_manager.create(\"import\", img=img, cont=cont,\n img_format=img_format, img_name=img_name)", "response": "Creates a task to import the specified image from the container."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nchanges the value of the specified key in the current environment.", "response": "def set_setting(key, val, env=None):\n \"\"\"\n Changes the value of the specified key in the current environment, or in\n another environment if specified.\n \"\"\"\n return settings.set(key, val, env=env)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncreate a new identity class.", "response": "def create_context(id_type=None, env=None, username=None, password=None,\n tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None):\n \"\"\"\n Returns an instance of the specified identity class, or if none is\n specified, an instance of the current setting for 'identity_class'.\n\n You may optionally set the environment by passing the name of that\n environment in the 'env' parameter.\n \"\"\"\n if env:\n set_environment(env)\n return _create_identity(id_type=id_type, username=username,\n password=password, tenant_id=tenant_id, tenant_name=tenant_name,\n api_key=api_key, verify_ssl=verify_ssl, return_context=True)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncreating an identity object for the current environment.", "response": "def _create_identity(id_type=None, username=None, password=None, tenant_id=None,\n tenant_name=None, api_key=None, verify_ssl=None,\n return_context=False):\n \"\"\"\n Creates an instance of the current identity_class and assigns it to the\n module-level name 'identity' by default. If 'return_context' is True, the\n module-level 'identity' is untouched, and instead the instance is returned.\n \"\"\"\n if id_type:\n cls = _import_identity(id_type)\n else:\n cls = settings.get(\"identity_class\")\n if not cls:\n raise exc.IdentityClassNotDefined(\"No identity class has \"\n \"been defined for the current environment.\")\n if verify_ssl is None:\n verify_ssl = get_setting(\"verify_ssl\")\n context = cls(username=username, password=password, tenant_id=tenant_id,\n tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl)\n if return_context:\n return context\n else:\n global identity\n identity = context"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nensures that the identity attribute is not None.", "response": "def _assure_identity(fnc):\n \"\"\"Ensures that the 'identity' attribute is not None.\"\"\"\n def _wrapped(*args, **kwargs):\n if identity is None:\n _create_identity()\n return fnc(*args, **kwargs)\n return _wrapped"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _require_auth(fnc):\n @wraps(fnc)\n @_assure_identity\n def _wrapped(*args, **kwargs):\n if not identity.authenticated:\n msg = \"Authentication required before calling '%s'.\" % fnc.__name__\n raise exc.NotAuthenticated(msg)\n return fnc(*args, **kwargs)\n return _wrapped", "response": "Decorator that ensures that the user is authenticated."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _safe_region(region=None, context=None):\n ret = region or settings.get(\"region\")\n context = context or identity\n if not ret:\n # Nothing specified; get the default from the identity object.\n if not context:\n _create_identity()\n context = identity\n ret = context.get_default_region()\n if not ret:\n # Use the first available region\n try:\n ret = regions[0]\n except IndexError:\n ret = \"\"\n return ret", "response": "Value to use when no region is specified."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_credentials(username, api_key=None, password=None, region=None,\n tenant_id=None, authenticate=True):\n \"\"\"\n Set the credentials directly, and then try to authenticate.\n\n If the region is passed, it will authenticate against the proper endpoint\n for that region, and set the default region for connections.\n \"\"\"\n global regions, services\n pw_key = password or api_key\n region = _safe_region(region)\n tenant_id = tenant_id or settings.get(\"tenant_id\")\n identity.set_credentials(username=username, password=pw_key,\n tenant_id=tenant_id, region=region, authenticate=authenticate)\n regions = tuple(identity.regions)\n services = tuple(identity.services.keys())\n connect_to_services(region=region)", "response": "Set the credentials for the current user and connect to services."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nset the credentials for the current region.", "response": "def set_credential_file(cred_file, region=None, authenticate=True):\n \"\"\"\n Read in the credentials from the supplied file path, and then try to\n authenticate. The file should be a standard config file in one of the\n following formats:\n\n For Keystone authentication:\n [keystone]\n username = myusername\n password = 1234567890abcdef\n tenant_id = abcdef1234567890\n\n For Rackspace authentication:\n [rackspace_cloud]\n username = myusername\n api_key = 1234567890abcdef\n\n If the region is passed, it will authenticate against the proper endpoint\n for that region, and set the default region for connections.\n \"\"\"\n global regions, services\n region = _safe_region(region)\n identity.set_credential_file(cred_file, region=region,\n authenticate=authenticate)\n regions = tuple(identity.regions)\n services = tuple(identity.services.keys())\n connect_to_services(region=region)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef keyring_auth(username=None, region=None, authenticate=True):\n if not keyring:\n # Module not installed\n raise exc.KeyringModuleNotInstalled(\"The 'keyring' Python module is \"\n \"not installed on this system.\")\n if username is None:\n username = settings.get(\"keyring_username\")\n if not username:\n raise exc.KeyringUsernameMissing(\"No username specified for keyring \"\n \"authentication.\")\n password = keyring.get_password(\"pyrax\", username)\n if password is None:\n raise exc.KeyringPasswordNotFound(\"No password was found for the \"\n \"username '%s'.\" % username)\n set_credentials(username, password, region=region,\n authenticate=authenticate)", "response": "Authenticate against the keyring."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef clear_credentials():\n global identity, regions, services, cloudservers, cloudfiles, cloud_cdn\n global cloud_loadbalancers, cloud_databases, cloud_blockstorage, cloud_dns\n global cloud_networks, cloud_monitoring, autoscale, images, queues\n identity = None\n regions = tuple()\n services = tuple()\n cloudservers = None\n cloudfiles = None\n cloud_cdn = None\n cloud_loadbalancers = None\n cloud_databases = None\n cloud_blockstorage = None\n cloud_dns = None\n cloud_networks = None\n cloud_monitoring = None\n autoscale = None\n images = None\n queues = None", "response": "De - authenticate by clearing all the names back to None."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nestablish authenticated connections to the various cloud APIs.", "response": "def connect_to_services(region=None):\n \"\"\"Establishes authenticated connections to the various cloud APIs.\"\"\"\n global cloudservers, cloudfiles, cloud_loadbalancers, cloud_databases\n global cloud_blockstorage, cloud_dns, cloud_networks, cloud_monitoring\n global autoscale, images, queues, cloud_cdn\n cloudservers = connect_to_cloudservers(region=region)\n cloudfiles = connect_to_cloudfiles(region=region)\n cloud_cdn = connect_to_cloud_cdn(region=region)\n cloud_loadbalancers = connect_to_cloud_loadbalancers(region=region)\n cloud_databases = connect_to_cloud_databases(region=region)\n cloud_blockstorage = connect_to_cloud_blockstorage(region=region)\n cloud_dns = connect_to_cloud_dns(region=region)\n cloud_networks = connect_to_cloud_networks(region=region)\n cloud_monitoring = connect_to_cloud_monitoring(region=region)\n autoscale = connect_to_autoscale(region=region)\n images = connect_to_images(region=region)\n queues = connect_to_queues(region=region)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nparses the services dict to get the proper endpoint for the given service and region.", "response": "def _get_service_endpoint(context, svc, region=None, public=True):\n \"\"\"\n Parses the services dict to get the proper endpoint for the given service.\n \"\"\"\n region = _safe_region(region)\n # If a specific context is passed, use that. Otherwise, use the global\n # identity reference.\n context = context or identity\n url_type = {True: \"public\", False: \"private\"}[public]\n svc_obj = context.services.get(svc)\n if not svc_obj:\n return None\n ep = svc_obj.endpoints.get(region, {}).get(url_type)\n if not ep:\n # Try the \"ALL\" region, and substitute the actual region\n ep = svc_obj.endpoints.get(\"ALL\", {}).get(url_type)\n return ep"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating a client for working with cloud servers.", "response": "def connect_to_cloudservers(region=None, context=None, verify_ssl=None, **kwargs):\n \"\"\"Creates a client for working with cloud servers.\"\"\"\n context = context or identity\n _cs_auth_plugin.discover_auth_systems()\n id_type = get_setting(\"identity_type\")\n if id_type != \"keystone\":\n auth_plugin = _cs_auth_plugin.load_plugin(id_type)\n else:\n auth_plugin = None\n region = _safe_region(region, context=context)\n mgt_url = _get_service_endpoint(context, \"compute\", region)\n cloudservers = None\n if not mgt_url:\n # Service is not available\n return\n if verify_ssl is None:\n insecure = not get_setting(\"verify_ssl\")\n else:\n insecure = not verify_ssl\n try:\n extensions = nc.discover_extensions(_cs_max_version)\n except AttributeError:\n extensions = None\n clt_class = _cs_client.get_client_class(_cs_max_version)\n cloudservers = clt_class(context.username, context.password,\n project_id=context.tenant_id, auth_url=context.auth_endpoint,\n auth_system=id_type, region_name=region, service_type=\"compute\",\n auth_plugin=auth_plugin, insecure=insecure, extensions=extensions,\n http_log_debug=_http_debug, **kwargs)\n agt = cloudservers.client.USER_AGENT\n cloudservers.client.USER_AGENT = _make_agent_name(agt)\n cloudservers.client.management_url = mgt_url\n cloudservers.client.auth_token = context.token\n cloudservers.exceptions = _cs_exceptions\n # Add some convenience methods\n cloudservers.list_images = cloudservers.images.list\n cloudservers.list_flavors = cloudservers.flavors.list\n cloudservers.list = cloudservers.servers.list\n\n def list_base_images():\n \"\"\"\n Returns a list of all base images; excludes any images created\n by this account.\n \"\"\"\n return [image for image in cloudservers.images.list()\n if not hasattr(image, \"server\")]\n\n def list_snapshots():\n \"\"\"\n Returns a list of all images created by this account; in other words, it\n excludes all the base images.\n \"\"\"\n return [image for image in cloudservers.images.list()\n if hasattr(image, \"server\")]\n\n def find_images_by_name(expr):\n \"\"\"\n Returns a list of images whose name contains the specified expression.\n The value passed is treated as a regular expression, allowing for more\n specific searches than simple wildcards. The matching is done in a\n case-insensitive manner.\n \"\"\"\n return [image for image in cloudservers.images.list()\n if re.search(expr, image.name, re.I)]\n\n cloudservers.list_base_images = list_base_images\n cloudservers.list_snapshots = list_snapshots\n cloudservers.find_images_by_name = find_images_by_name\n cloudservers.identity = identity\n return cloudservers"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a client for working with CloudFiles.", "response": "def connect_to_cloudfiles(region=None, public=None):\n \"\"\"Creates a client for working with CloudFiles/Swift.\"\"\"\n if public is None:\n is_public = not bool(get_setting(\"use_servicenet\"))\n else:\n is_public = public\n ret = _create_client(ep_name=\"object_store\", region=region,\n public=is_public)\n if ret:\n # Add CDN endpoints, if available\n region = _safe_region(region)\n ret.cdn_management_url = _get_service_endpoint(None, \"object_cdn\",\n region, public=is_public)\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef connect_to_cloud_cdn(region=None):\n global default_region\n # (nicholaskuechler/keekz) 2017-11-30 - Not a very elegant solution...\n # Cloud CDN only exists in 2 regions: DFW and LON\n # But this isn't playing nicely with the identity service catalog results.\n # US auth based regions (DFW, ORD, IAD, SYD, HKG) need to use CDN in DFW\n # UK auth based regions (LON) need to use CDN in LON\n if region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']:\n return _create_client(ep_name=\"cdn\", region=\"DFW\")\n elif region in ['LON']:\n return _create_client(ep_name=\"cdn\", region=\"LON\")\n else:\n if default_region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']:\n return _create_client(ep_name=\"cdn\", region=\"DFW\")\n elif default_region in ['LON']:\n return _create_client(ep_name=\"cdn\", region=\"LON\")\n else:\n return _create_client(ep_name=\"cdn\", region=region)", "response": "Creates a client for working with cloud loadbalancers."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a client for working with Images.", "response": "def connect_to_images(region=None, public=True):\n \"\"\"Creates a client for working with Images.\"\"\"\n return _create_client(ep_name=\"image\", region=region, public=public)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncreating a client for working with Queues.", "response": "def connect_to_queues(region=None, public=True):\n \"\"\"Creates a client for working with Queues.\"\"\"\n return _create_client(ep_name=\"queues\", region=region, public=public)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get(self, key, env=None):\n if env is None:\n env = self.environment\n try:\n ret = self._settings[env][key]\n except KeyError:\n ret = None\n if ret is None:\n # See if it's set in the environment\n if key == \"identity_class\":\n # This is defined via the identity_type\n env_var = self.env_dct.get(\"identity_type\")\n ityp = os.environ.get(env_var)\n if ityp:\n return _import_identity(ityp)\n else:\n env_var = self.env_dct.get(key)\n if env_var is not None:\n ret = os.environ.get(env_var)\n return ret", "response": "Returns the value for the specified key in the specified environment."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef set(self, key, val, env=None):\n if env is None:\n env = self.environment\n else:\n if env not in self._settings:\n raise exc.EnvironmentNotFound(\"There is no environment named \"\n \"'%s'.\" % env)\n dct = self._settings[env]\n if key not in dct:\n raise exc.InvalidSetting(\"The setting '%s' is not defined.\" % key)\n dct[key] = val\n if key == \"identity_type\":\n # If setting the identity_type, also change the identity_class.\n dct[\"identity_class\"] = _import_identity(val)\n elif key == \"region\":\n if not identity:\n return\n current = identity.region\n if current == val:\n return\n if \"LON\" in (current, val):\n # This is an outlier, as it has a separate auth\n identity.region = val\n elif key == \"verify_ssl\":\n if not identity:\n return\n identity.verify_ssl = val", "response": "Sets the value for the specified key to the new value."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef read_config(self, config_file):\n cfg = ConfigParser.SafeConfigParser()\n try:\n cfg.read(config_file)\n except ConfigParser.MissingSectionHeaderError as e:\n # The file exists, but doesn't have the correct format.\n raise exc.InvalidConfigurationFile(e)\n\n def safe_get(section, option, default=None):\n try:\n return cfg.get(section, option)\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n return default\n\n # A common mistake is including credentials in the config file. If any\n # values are found, issue a warning so that the developer can correct\n # this problem.\n creds_found = False\n for section in cfg.sections():\n if section == \"settings\":\n section_name = \"default\"\n self._default_set = True\n else:\n section_name = section\n # Check for included credentials\n for key in (\"username\", \"password\", \"api_key\"):\n if creds_found:\n break\n if safe_get(section, key):\n creds_found = True\n dct = self._settings[section_name] = {}\n dct[\"region\"] = safe_get(section, \"region\", default_region)\n ityp = safe_get(section, \"identity_type\")\n if ityp:\n dct[\"identity_type\"] = _id_type(ityp)\n dct[\"identity_class\"] = _import_identity(ityp)\n # Handle both the old and new names for this setting.\n debug = safe_get(section, \"debug\")\n if debug is None:\n debug = safe_get(section, \"http_debug\", \"False\")\n dct[\"http_debug\"] = debug == \"True\"\n verify_ssl = safe_get(section, \"verify_ssl\", \"True\")\n dct[\"verify_ssl\"] = verify_ssl == \"True\"\n dct[\"keyring_username\"] = safe_get(section, \"keyring_username\")\n dct[\"encoding\"] = safe_get(section, \"encoding\", default_encoding)\n dct[\"auth_endpoint\"] = safe_get(section, \"auth_endpoint\")\n dct[\"tenant_name\"] = safe_get(section, \"tenant_name\")\n dct[\"tenant_id\"] = safe_get(section, \"tenant_id\")\n use_servicenet = safe_get(section, \"use_servicenet\", \"False\")\n dct[\"use_servicenet\"] = use_servicenet == \"True\"\n app_agent = safe_get(section, \"custom_user_agent\")\n if app_agent:\n # Customize the user-agent string with the app name.\n dct[\"user_agent\"] = \"%s %s\" % (app_agent, USER_AGENT)\n else:\n dct[\"user_agent\"] = USER_AGENT\n\n # If this is the first section, make it the default\n if not self._default_set:\n self._settings[\"default\"] = self._settings[section]\n self._default_set = True\n if creds_found:\n warnings.warn(\"Login credentials were detected in your .pyrax.cfg \"\n \"file. These have been ignored, but you should remove \"\n \"them and either place them in a credential file, or \"\n \"consider using another means of authentication. More \"\n \"information on the use of credential files can be found \"\n \"in the 'docs/getting_started.md' document.\")", "response": "Reads the specified configuration file and stores the values in the internal dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ndeletes this domain and all of its resource records.", "response": "def delete(self, delete_subdomains=False):\n \"\"\"\n Deletes this domain and all of its resource records. If this domain has\n subdomains, each subdomain will now become a root domain. If you wish to\n also delete any subdomains, pass True to 'delete_subdomains'.\n \"\"\"\n self.manager.delete(self, delete_subdomains=delete_subdomains)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update(self, emailAddress=None, ttl=None, comment=None):\n return self.manager.update_domain(self, emailAddress=emailAddress,\n ttl=ttl, comment=comment)", "response": "Updates the attributes of a domain in the cache."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list_subdomains(self, limit=None, offset=None):\n return self.manager.list_subdomains(self, limit=limit, offset=offset)", "response": "Returns a list of all subdomains for this domain."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning a list of all records configured for this domain.", "response": "def list_records(self, limit=None, offset=None):\n \"\"\"\n Returns a list of all records configured for this domain.\n \"\"\"\n return self.manager.list_records(self, limit=limit, offset=offset)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a list of all records configured for this domain that match the supplied search criteria.", "response": "def search_records(self, record_type, name=None, data=None):\n \"\"\"\n Returns a list of all records configured for this domain that match\n the supplied search criteria.\n \"\"\"\n return self.manager.search_records(self, record_type=record_type,\n name=name, data=data)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef find_record(self, record_type, name=None, data=None):\n matches = self.manager.search_records(self, record_type=record_type,\n name=name, data=data)\n if not matches:\n raise exc.DomainRecordNotFound\n elif len(matches) > 1:\n raise exc.DomainRecordNotUnique\n return matches[0]", "response": "Returns a single record for this domain that matches the supplied search criteria."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nupdating an existing record for this domain.", "response": "def update_record(self, record, data=None, priority=None,\n ttl=None, comment=None):\n \"\"\"\n Modifies an existing record for this domain.\n \"\"\"\n return self.manager.update_record(self, record, data=data,\n priority=priority, ttl=ttl, comment=comment)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _create_body(self, name, emailAddress, ttl=3600, comment=None,\n subdomains=None, records=None):\n \"\"\"\n Creates the appropriate dict for creating a new domain.\n \"\"\"\n if subdomains is None:\n subdomains = []\n if records is None:\n records = []\n body = {\"domains\": [{\n \"name\": name,\n \"emailAddress\": emailAddress,\n \"ttl\": ttl,\n \"comment\": comment,\n \"subdomains\": {\n \"domains\": subdomains\n },\n \"recordsList\": {\n \"records\": records\n },\n }]}\n return body", "response": "Creates the appropriate dict for creating a new domain."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _reset_paging(self, service, body=None):\n if service == \"all\":\n for svc in self._paging.keys():\n svc_dct = self._paging[svc]\n svc_dct[\"next_uri\"] = svc_dct[\"prev_uri\"] = None\n svc_dct[\"total_entries\"] = None\n return\n svc_dct = self._paging[service]\n svc_dct[\"next_uri\"] = svc_dct[\"prev_uri\"] = None\n svc_dct[\"total_entries\"] = None\n if not body:\n return\n svc_dct[\"total_entries\"] = body.get(\"totalEntries\")\n links = body.get(\"links\")\n uri_base = self.uri_base\n if links:\n for link in links:\n href = link[\"href\"]\n pos = href.index(uri_base)\n page_uri = href[pos - 1:]\n if link[\"rel\"] == \"next\":\n svc_dct[\"next_uri\"] = page_uri\n elif link[\"rel\"] == \"previous\":\n svc_dct[\"prev_uri\"] = page_uri", "response": "Resets the internal attributes when there is no current paging request."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef list(self, limit=None, offset=None):\n uri = \"/%s%s\" % (self.uri_base, self._get_pagination_qs(limit, offset))\n return self._list(uri)", "response": "Gets a list of all domains or optionally a page of domains."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nhandle the communication with the API when getting a full listing of the resources managed by this class.", "response": "def _list(self, uri, obj_class=None, list_all=False):\n \"\"\"\n Handles the communication with the API when getting\n a full listing of the resources managed by this class.\n \"\"\"\n resp, resp_body = self._retry_get(uri)\n if obj_class is None:\n obj_class = self.resource_class\n\n data = resp_body[self.plural_response_key]\n ret = [obj_class(self, res, loaded=False)\n for res in data if res]\n self._reset_paging(\"domain\", resp_body)\n if list_all:\n dom_paging = self._paging.get(\"domain\", {})\n while dom_paging.get(\"next_uri\"):\n next_uri = dom_paging.get(\"next_uri\")\n ret.extend(self._list(uri=next_uri, obj_class=obj_class,\n list_all=False))\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_previous_page(self):\n uri = self._paging.get(\"domain\", {}).get(\"prev_uri\")\n if uri is None:\n raise exc.NoMoreResults(\"There are no previous pages of domains \"\n \"to list.\")\n return self._list(uri)", "response": "Returns the previous page of domain entries in the current domain."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef list_next_page(self):\n uri = self._paging.get(\"domain\", {}).get(\"next_uri\")\n if uri is None:\n raise exc.NoMoreResults(\"There are no more pages of domains to \"\n \"list.\")\n return self._list(uri)", "response": "Returns the next page of domain records."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nhandling the communication with the API when getting a specific resource managed by this class. Because DNS returns a different format for the body, the BaseManager method must be overridden here.", "response": "def _get(self, uri):\n \"\"\"\n Handles the communication with the API when getting\n a specific resource managed by this class.\n\n Because DNS returns a different format for the body,\n the BaseManager method must be overridden here.\n \"\"\"\n uri = \"%s?showRecords=false&showSubdomains=false\" % uri\n resp, body = self._retry_get(uri)\n body[\"records\"] = []\n return self.resource_class(self, body, loaded=True)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _retry_get(self, uri):\n for i in six.moves.range(DEFAULT_RETRY):\n resp, body = self.api.method_get(uri)\n if body:\n return resp, body\n # Tried too many times\n raise exc.ServiceResponseFailure(\"The Cloud DNS service failed to \"\n \"respond to the request.\")", "response": "Attempts to retry on GET requests."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nhandling asynchronous call to the DNS API.", "response": "def _async_call(self, uri, body=None, method=\"GET\", error_class=None,\n has_response=True, *args, **kwargs):\n \"\"\"\n Handles asynchronous call/responses for the DNS API.\n\n Returns the response headers and body if the call was successful.\n If an error status is returned, and the 'error_class' parameter is\n specified, that class of error will be raised with the details from\n the response. If no error class is specified, the response headers\n and body will be returned to the calling method, which will have\n to handle the result.\n \"\"\"\n api_methods = {\n \"GET\": self._retry_get,\n \"POST\": self.api.method_post,\n \"PUT\": self.api.method_put,\n \"DELETE\": self.api.method_delete,\n }\n api_method = api_methods[method]\n try:\n if body is None:\n resp, resp_body = api_method(uri, *args, **kwargs)\n else:\n resp, resp_body = api_method(uri, body=body, *args, **kwargs)\n except Exception as e:\n if error_class:\n raise error_class(e)\n else:\n raise\n\n callbackURL = resp_body[\"callbackUrl\"].split(\"/status/\")[-1]\n massagedURL = \"/status/%s?showDetails=true\" % callbackURL\n start = time.time()\n timed_out = False\n while (resp_body[\"status\"] == \"RUNNING\") and not timed_out:\n resp_body = None\n while resp_body is None and not timed_out:\n resp, resp_body = self._retry_get(massagedURL)\n if self._timeout:\n timed_out = ((time.time() - start) > self._timeout)\n time.sleep(self._delay)\n\n if timed_out:\n raise exc.DNSCallTimedOut(\"The API call to '%s' did not complete \"\n \"after %s seconds.\" % (uri, self._timeout))\n if error_class and (resp_body[\"status\"] == \"ERROR\"):\n # This call will handle raising the error.\n self._process_async_error(resp_body, error_class)\n if has_response:\n ret = resp, resp_body[\"response\"]\n else:\n ret = resp, resp_body\n try:\n resp_body = json.loads(resp_body)\n except Exception:\n pass\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nprocesses an async response to get the error message from the DNS API.", "response": "def _process_async_error(self, resp_body, error_class):\n \"\"\"\n The DNS API does not return a consistent format for their error\n messages. This abstracts out the differences in order to present\n a single unified message in the exception to be raised.\n \"\"\"\n def _fmt_error(err):\n # Remove the cumbersome Java-esque message\n details = err.get(\"details\", \"\").replace(\"\\n\", \" \")\n if not details:\n details = err.get(\"message\", \"\")\n return \"%s (%s)\" % (details, err.get(\"code\", \"\"))\n\n error = resp_body.get(\"error\", \"\")\n if \"failedItems\" in error:\n # Multi-error response\n faults = error.get(\"failedItems\", {}).get(\"faults\", [])\n msgs = [_fmt_error(fault) for fault in faults]\n msg = \"\\n\".join(msgs)\n else:\n msg = _fmt_error(error)\n raise error_class(msg)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a new base manager record set.", "response": "def _create(self, uri, body, records=None, subdomains=None,\n return_none=False, return_raw=False, **kwargs):\n \"\"\"\n Handles the communication with the API when creating a new\n resource managed by this class.\n\n Since DNS works completely differently for create() than the other\n APIs, this method overrides the default BaseManager behavior.\n\n If 'records' are supplied, they should be a list of dicts. Each\n record dict should have the following format:\n\n {\"name\": \"example.com\",\n \"type\": \"A\",\n \"data\": \"192.0.2.17\",\n \"ttl\": 86400}\n\n If 'subdomains' are supplied, they should be a list of dicts. Each\n subdomain dict should have the following format:\n\n {\"name\": \"sub1.example.com\",\n \"comment\": \"1st sample subdomain\",\n \"emailAddress\": \"sample@rackspace.com\"}\n \"\"\"\n self.run_hooks(\"modify_body_for_create\", body, **kwargs)\n resp, resp_body = self._async_call(uri, body=body, method=\"POST\",\n error_class=exc.DomainCreationFailed)\n response_body = resp_body[self.response_key][0]\n return self.resource_class(self, response_body)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndelete the specified domain and all of its resource records.", "response": "def delete(self, domain, delete_subdomains=False):\n \"\"\"\n Deletes the specified domain and all of its resource records. If the\n domain has subdomains, each subdomain will now become a root domain. If\n you wish to also delete any subdomains, pass True to 'delete_subdomains'.\n \"\"\"\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(domain))\n if delete_subdomains:\n uri = \"%s?deleteSubdomains=true\" % uri\n resp, resp_body = self._async_call(uri, method=\"DELETE\",\n error_class=exc.DomainDeletionFailed, has_response=False)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfinds all items with attributes matching kwargs.", "response": "def findall(self, **kwargs):\n \"\"\"\n Finds all items with attributes matching ``**kwargs``.\n\n Normally this isn't very efficient, since the default action is to\n load the entire list and then filter on the Python side, but the DNS\n API provides a more efficient search option when filtering on name.\n So if the filter is on name, use that; otherwise, use the default.\n \"\"\"\n if (len(kwargs) == 1) and (\"name\" in kwargs):\n # Filtering on name; use the more efficient method.\n nm = kwargs[\"name\"].lower()\n uri = \"/%s?name=%s\" % (self.uri_base, nm)\n matches = self._list(uri, list_all=True)\n return [match for match in matches\n if match.name.lower() == nm]\n else:\n return super(CloudDNSManager, self).findall(**kwargs)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nget the changes for a domain since a specified date or datetime.", "response": "def changes_since(self, domain, date_or_datetime):\n \"\"\"\n Gets the changes for a domain since the specified date/datetime.\n The date can be one of:\n - a Python datetime object\n - a Python date object\n - a string in the format 'YYYY-MM-YY HH:MM:SS'\n - a string in the format 'YYYY-MM-YY'\n\n It returns a list of dicts, whose keys depend on the specific change\n that was made. A simple example of such a change dict:\n\n {u'accountId': 000000,\n u'action': u'update',\n u'changeDetails': [{u'field': u'serial_number',\n u'newValue': u'1354038941',\n u'originalValue': u'1354038940'},\n {u'field': u'updated_at',\n u'newValue': u'Tue Nov 27 17:55:41 UTC 2012',\n u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}],\n u'domain': u'example.com',\n u'targetId': 00000000,\n u'targetType': u'Domain'}\n \"\"\"\n domain_id = utils.get_id(domain)\n dt = utils.iso_time_string(date_or_datetime, show_tzinfo=True)\n uri = \"/domains/%s/changes?since=%s\" % (domain_id, dt)\n resp, body = self._retry_get(uri)\n return body.get(\"changes\", [])"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef export_domain(self, domain):\n uri = \"/domains/%s/export\" % utils.get_id(domain)\n resp, resp_body = self._async_call(uri, method=\"GET\",\n error_class=exc.NotFound)\n return resp_body.get(\"contents\", \"\")", "response": "This method returns the contents of the specified domain as a string."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncreate a new domain in the BIND 9 format and returns the new domain.", "response": "def import_domain(self, domain_data):\n \"\"\"\n Takes a string in the BIND 9 format and creates a new domain. See the\n 'export_domain()' method for a description of the format.\n \"\"\"\n uri = \"/domains/import\"\n body = {\"domains\": [{\n \"contentType\": \"BIND_9\",\n \"contents\": domain_data,\n }]}\n resp, resp_body = self._async_call(uri, method=\"POST\", body=body,\n error_class=exc.DomainCreationFailed)\n return resp_body"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_domain(self, domain, emailAddress=None, ttl=None, comment=None):\n if not any((emailAddress, ttl, comment)):\n raise exc.MissingDNSSettings(\n \"No settings provided to update_domain().\")\n uri = \"/domains/%s\" % utils.get_id(domain)\n body = {\"comment\": comment,\n \"ttl\": ttl,\n \"emailAddress\": emailAddress,\n }\n none_keys = [key for key, val in body.items()\n if val is None]\n for none_key in none_keys:\n body.pop(none_key)\n resp, resp_body = self._async_call(uri, method=\"PUT\", body=body,\n error_class=exc.DomainUpdateFailed, has_response=False)\n return resp_body", "response": "Updates the attributes of a domain in the specified domain."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a list of all subdomains of the specified domain.", "response": "def list_subdomains(self, domain, limit=None, offset=None):\n \"\"\"\n Returns a list of all subdomains of the specified domain.\n \"\"\"\n # The commented-out uri is the official API, but it is\n # horribly slow.\n# uri = \"/domains/%s/subdomains\" % utils.get_id(domain)\n uri = \"/domains?name=%s\" % domain.name\n page_qs = self._get_pagination_qs(limit, offset)\n if page_qs:\n uri = \"%s&%s\" % (uri, page_qs[1:])\n return self._list_subdomains(uri, domain.id)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list_subdomains_next_page(self):\n uri = self._paging.get(\"subdomain\", {}).get(\"next_uri\")\n if uri is None:\n raise exc.NoMoreResults(\"There are no more pages of subdomains \"\n \"to list.\")\n return self._list_subdomains(uri)", "response": "Returns the next page of subdomains for the current session."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a list of all records configured for the specified domain.", "response": "def list_records(self, domain, limit=None, offset=None):\n \"\"\"\n Returns a list of all records configured for the specified domain.\n \"\"\"\n uri = \"/domains/%s/records%s\" % (utils.get_id(domain),\n self._get_pagination_qs(limit, offset))\n return self._list_records(uri)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef list_records_previous_page(self):\n uri = self._paging.get(\"record\", {}).get(\"prev_uri\")\n if uri is None:\n raise exc.NoMoreResults(\"There are no previous pages of records \"\n \"to list.\")\n return self._list_records(uri)", "response": "Returns the previous page of records for the current record ID."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list_records_next_page(self):\n uri = self._paging.get(\"record\", {}).get(\"next_uri\")\n if uri is None:\n raise exc.NoMoreResults(\"There are no more pages of records to list.\")\n return self._list_records(uri)", "response": "Returns the next page of records from the API."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef search_records(self, domain, record_type, name=None, data=None):\n search_params = []\n if name:\n search_params.append(\"name=%s\" % name)\n if data:\n search_params.append(\"data=%s\" % data)\n query_string = \"&\".join(search_params)\n dom_id = utils.get_id(domain)\n uri = \"/domains/%s/records?type=%s\" % (dom_id, record_type)\n if query_string:\n uri = \"%s&%s\" % (uri, query_string)\n resp, body = self._retry_get(uri)\n records = body.get(\"records\", [])\n self._reset_paging(\"record\", body)\n rec_paging = self._paging.get(\"record\", {})\n while rec_paging.get(\"next_uri\"):\n resp, body = self._retry_get(rec_paging.get(\"next_uri\"))\n self._reset_paging(\"record\", body)\n records.extend(body.get(\"records\", []))\n for record in records:\n record[\"domain_id\"] = dom_id\n return [CloudDNSRecord(self, record, loaded=False)\n for record in records if record]", "response": "Returns a list of all records configured for the specified domain that match the supplied search criteria."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nadds the records to this domain.", "response": "def add_records(self, domain, records):\n \"\"\"\n Adds the records to this domain. Each record should be a dict with the\n following keys:\n - type (required)\n - name (required)\n - data (required)\n - ttl (optional)\n - comment (optional)\n - priority (required for MX and SRV records; forbidden otherwise)\n \"\"\"\n if isinstance(records, dict):\n # Single record passed\n records = [records]\n dom_id = utils.get_id(domain)\n uri = \"/domains/%s/records\" % dom_id\n body = {\"records\": records}\n resp, resp_body = self._async_call(uri, method=\"POST\", body=body,\n error_class=exc.DomainRecordAdditionFailed, has_response=False)\n records = resp_body.get(\"response\", {}).get(\"records\", [])\n for record in records:\n record[\"domain_id\"] = dom_id\n return [CloudDNSRecord(self, record, loaded=False)\n for record in records if record]"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngetting the full information for an existing record for this domain.", "response": "def get_record(self, domain, record):\n \"\"\"\n Gets the full information for an existing record for this domain.\n \"\"\"\n rec_id = utils.get_id(record)\n domain_id = utils.get_id(domain)\n uri = \"/domains/%s/records/%s\" % (domain_id, rec_id)\n resp, resp_body = self._retry_get(uri)\n resp_body[\"domain_id\"] = domain_id\n return CloudDNSRecord(self, resp_body, loaded=False)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nupdates an existing record for a domain.", "response": "def update_record(self, domain, record, data=None, priority=None,\n ttl=None, comment=None):\n \"\"\"\n Modifies an existing record for a domain.\n \"\"\"\n rdict = {\"id\": record.id,\n \"name\": record.name,\n }\n pdict = {\"data\": data,\n \"priority\": priority,\n \"ttl\": ttl,\n \"comment\": comment,\n }\n utils.params_to_dict(pdict, rdict)\n return self.update_records(domain, [rdict])"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef update_records(self, domain, records):\n if not isinstance(records, list):\n raise TypeError(\"Expected records of type list\")\n uri = \"/domains/%s/records\" % utils.get_id(domain)\n resp, resp_body = self._async_call(uri, method=\"PUT\",\n body={\"records\": records},\n error_class=exc.DomainRecordUpdateFailed, has_response=False)\n return resp_body", "response": "Updates an existing record for a domain."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete_record(self, domain, record):\n uri = \"/domains/%s/records/%s\" % (utils.get_id(domain),\n utils.get_id(record))\n resp, resp_body = self._async_call(uri, method=\"DELETE\",\n error_class=exc.DomainRecordDeletionFailed, has_response=False)\n return resp_body", "response": "Deletes an existing record for a domain."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the HREF link and service name for use with PTR record management.", "response": "def _get_ptr_details(self, device, device_type):\n \"\"\"\n Takes a device and device type and returns the corresponding HREF link\n and service name for use with PTR record management.\n \"\"\"\n context = self.api.identity\n region = self.api.region_name\n if device_type.lower().startswith(\"load\"):\n ep = pyrax._get_service_endpoint(context, \"load_balancer\", region)\n svc = \"loadbalancers\"\n svc_name = \"cloudLoadBalancers\"\n else:\n ep = pyrax._get_service_endpoint(context, \"compute\", region)\n svc = \"servers\"\n svc_name = \"cloudServersOpenStack\"\n href = \"%s/%s/%s\" % (ep, svc, utils.get_id(device))\n return (href, svc_name)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ngiving a device determines if it is a CloudServer a CloudLoadBalancer or an invalid device.", "response": "def _resolve_device_type(self, device):\n \"\"\"\n Given a device, determines if it is a CloudServer, a CloudLoadBalancer,\n or an invalid device.\n \"\"\"\n try:\n from tests.unit import fakes\n server_types = (pyrax.CloudServer, fakes.FakeServer)\n lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer,\n fakes.FakeDNSDevice)\n except ImportError:\n # Not running with tests\n server_types = (pyrax.CloudServer, )\n lb_types = (CloudLoadBalancer, )\n if isinstance(device, server_types):\n device_type = \"server\"\n elif isinstance(device, lb_types):\n device_type = \"loadbalancer\"\n else:\n raise exc.InvalidDeviceType(\"The device '%s' must be a CloudServer \"\n \"or a CloudLoadBalancer.\" % device)\n return device_type"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef list_ptr_records(self, device):\n device_type = self._resolve_device_type(device)\n href, svc_name = self._get_ptr_details(device, device_type)\n uri = \"/rdns/%s?href=%s\" % (svc_name, href)\n try:\n resp, resp_body = self._retry_get(uri)\n except exc.NotFound:\n return []\n records = [CloudDNSPTRRecord(rec, device)\n for rec in resp_body.get(\"records\", [])]\n return records", "response": "Returns a list of all PTR records configured for this device."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nadding one or more PTR records to the specified device.", "response": "def add_ptr_records(self, device, records):\n \"\"\"\n Adds one or more PTR records to the specified device.\n \"\"\"\n device_type = self._resolve_device_type(device)\n href, svc_name = self._get_ptr_details(device, device_type)\n if not isinstance(records, (list, tuple)):\n records = [records]\n body = {\"recordsList\": {\n \"records\": records},\n \"link\": {\n \"content\": \"\",\n \"href\": href,\n \"rel\": svc_name,\n }}\n uri = \"/rdns\"\n # This is a necessary hack, so here's why: if you attempt to add\n # PTR records to device, and you don't have rights to either the device\n # or the IP address, the DNS API will return a 401 - Unauthorized.\n # Unfortunately, the pyrax client interprets this as a bad auth token,\n # and there is no way to distinguish this from an actual authentication\n # failure. The client will attempt to re-authenticate as a result, and\n # will fail, due to the DNS API not having regional endpoints. The net\n # result is that an EndpointNotFound exception will be raised, which\n # we catch here and then raise a more meaningful exception.\n # The Rackspace DNS team is working on changing this to return a 403\n # instead; when that happens this kludge can go away.\n try:\n resp, resp_body = self._async_call(uri, body=body, method=\"POST\",\n error_class=exc.PTRRecordCreationFailed)\n except exc.EndpointNotFound:\n raise exc.InvalidPTRRecord(\"The domain/IP address information is not \"\n \"valid for this device.\")\n return resp_body.get(\"records\")\n records = [CloudDNSPTRRecord(rec, device)\n for rec in resp_body.get(\"records\", [])]\n return records"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nupdating a PTR record with the supplied values.", "response": "def update_ptr_record(self, device, record, domain_name, data=None,\n ttl=None, comment=None):\n \"\"\"\n Updates a PTR record with the supplied values.\n \"\"\"\n device_type = self._resolve_device_type(device)\n href, svc_name = self._get_ptr_details(device, device_type)\n try:\n rec_id = record.id\n except AttributeError:\n rec_id = record\n rec = {\"name\": domain_name,\n \"id\": rec_id,\n \"type\": \"PTR\",\n \"data\": data,\n }\n if ttl is not None:\n # Minimum TTL is 300 seconds\n rec[\"ttl\"] = max(300, ttl)\n if comment is not None:\n # Maximum comment length is 160 chars\n rec[\"comment\"] = comment[:160]\n body = {\"recordsList\": {\n \"records\": [rec]},\n \"link\": {\n \"content\": \"\",\n \"href\": href,\n \"rel\": svc_name,\n }}\n uri = \"/rdns\"\n try:\n resp, resp_body = self._async_call(uri, body=body, method=\"PUT\",\n has_response=False, error_class=exc.PTRRecordUpdateFailed)\n except exc.EndpointNotFound as e:\n raise exc.InvalidPTRRecord(\"The record domain/IP address \"\n \"information is not valid for this device.\")\n return resp_body.get(\"status\") == \"COMPLETED\""} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef delete_ptr_records(self, device, ip_address=None):\n device_type = self._resolve_device_type(device)\n href, svc_name = self._get_ptr_details(device, device_type)\n uri = \"/rdns/%s?href=%s\" % (svc_name, href)\n if ip_address:\n uri = \"%s&ip=%s\" % (uri, ip_address)\n resp, resp_body = self._async_call(uri, method=\"DELETE\",\n has_response=False,\n error_class=exc.PTRRecordDeletionFailed)\n return resp_body.get(\"status\") == \"COMPLETED\"", "response": "Deletes the PTR records for the specified device."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a manager to handle the instances and flavors.", "response": "def _configure_manager(self):\n \"\"\"\n Creates a manager to handle the instances, and another\n to handle flavors.\n \"\"\"\n self._manager = CloudDNSManager(self, resource_class=CloudDNSDomain,\n response_key=\"domains\", plural_response_key=\"domains\",\n uri_base=\"domains\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef method_get(self, uri, **kwargs):\n for i in six.moves.range(3):\n resp, body = super(CloudDNSClient, self).method_get(uri, **kwargs)\n if body:\n return resp, body\n raise exc.ServiceResponseFailure(\"The Cloud DNS service failed to \"\n \"respond to the request.\")", "response": "Overload the method_get function to retry on empty body"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a list of all resources.", "response": "def list(self, limit=None, offset=None):\n \"\"\"Returns a list of all resources.\"\"\"\n return self._manager.list(limit=limit, offset=offset)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nupdate the attributes of a domain record.", "response": "def update_domain(self, domain, emailAddress=None, ttl=None, comment=None):\n \"\"\"\n Provides a way to modify the following attributes of a domain\n record:\n - email address\n - ttl setting\n - comment\n \"\"\"\n return domain.update(emailAddress=emailAddress,\n ttl=ttl, comment=comment)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef list_subdomains(self, domain, limit=None, offset=None):\n return domain.list_subdomains(limit=limit, offset=offset)", "response": "Returns a list of all subdomains for the specified domain."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn an iterator that will return each available subdomain for the specified domain.", "response": "def get_subdomain_iterator(self, domain, limit=None, offset=None):\n \"\"\"\n Returns an iterator that will return each available subdomain for the\n specified domain. If there are more than the limit of 100 subdomains,\n the iterator will continue to fetch subdomains from the API until all\n subdomains have been returned.\n \"\"\"\n return SubdomainResultsIterator(self._manager, domain=domain)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef list_records(self, domain, limit=None, offset=None):\n return domain.list_records(limit=limit, offset=offset)", "response": "Returns a list of all records configured for the specified domain."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef search_records(self, domain, record_type, name=None, data=None):\n return domain.search_records(record_type=record_type,\n name=name, data=data)", "response": "Returns a list of all records configured for the specified domain."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef find_record(self, domain, record_type, name=None, data=None):\n return domain.find_record(record_type=record_type,\n name=name, data=data)", "response": "Returns a single record for this domain that matches the supplied search criteria."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update_record(self, domain, record, data=None, priority=None, ttl=None,\n comment=None):\n \"\"\"\n Modifies an existing record for a domain.\n \"\"\"\n return domain.update_record(record, data=data, priority=priority,\n ttl=ttl, comment=comment)", "response": "Updates an existing record in a domain."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nupdates a PTR record with the supplied data and TTL.", "response": "def update_ptr_record(self, device, record, domain_name, data=None,\n ttl=None, comment=None):\n \"\"\"\n Updates a PTR record with the supplied values.\n \"\"\"\n return self._manager.update_ptr_record(device, record, domain_name,\n data=data, ttl=ttl, comment=comment)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ndelete the PTR records for the specified device.", "response": "def delete_ptr_records(self, device, ip_address=None):\n \"\"\"\n Deletes the PTR records for the specified device. If 'ip_address'\n is supplied, only the PTR records with that IP address will be deleted.\n \"\"\"\n return self._manager.delete_ptr_records(device, ip_address=ip_address)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a dict with the absolute limits for the current account.", "response": "def get_absolute_limits(self):\n \"\"\"\n Returns a dict with the absolute limits for the current account.\n \"\"\"\n resp, body = self.method_get(\"/limits\")\n absolute_limits = body.get(\"limits\", {}).get(\"absolute\")\n return absolute_limits"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_rate_limits(self):\n resp, body = self.method_get(\"/limits\")\n rate_limits = body.get(\"limits\", {}).get(\"rate\")\n ret = []\n for rate_limit in rate_limits:\n limits = rate_limit[\"limit\"]\n uri_limits = {\"uri\": rate_limit[\"uri\"],\n \"limits\": limits}\n ret.append(uri_limits)\n return ret", "response": "Returns a list of the current rate limits for domain\n and status requests."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef next(self):\n try:\n return self.results.pop(0)\n except IndexError:\n if self.next_uri is None:\n raise StopIteration()\n else:\n if not self.next_uri:\n if self.domain:\n self.results = self.list_method(self.domain)\n else:\n self.results = self.list_method()\n else:\n args = self.extra_args\n self.results = self._list_method(self.next_uri, *args)\n self.next_uri = self.manager._paging.get(\n self.paging_service, {}).get(\"next_uri\")\n # We should have more results.\n try:\n return self.results.pop(0)\n except IndexError:\n raise StopIteration()", "response": "Return the next available item from the API."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreading the Rackspace credential file and sets the attributes.", "response": "def _read_credential_file(self, cfg):\n \"\"\"\n Parses the credential file with Rackspace-specific labels.\n \"\"\"\n self.username = cfg.get(\"rackspace_cloud\", \"username\")\n try:\n self.password = cfg.get(\"rackspace_cloud\", \"api_key\", raw=True)\n except ConfigParser.NoOptionError as e:\n # Allow either the use of either 'api_key' or 'password'.\n self.password = cfg.get(\"rackspace_cloud\", \"password\", raw=True)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _format_credentials(self):\n if self._creds_style == \"apikey\":\n return {\"auth\": {\"RAX-KSKEY:apiKeyCredentials\":\n {\"username\": \"%s\" % self.username,\n \"apiKey\": \"%s\" % self.api_key}}}\n else:\n # Return in the default password-style\n return super(RaxIdentity, self)._format_credentials()", "response": "Returns the current credentials in the format expected by the Rackspace authentication service."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef set_credentials(self, username, password=None, region=None,\n tenant_id=None, authenticate=False):\n \"\"\"\n Sets the username and password directly. Because Rackspace auth uses\n the api_key, make sure that any old values are cleared.\n \"\"\"\n self.api_key = None\n super(RaxIdentity, self).set_credentials(username, password=password,\n region=region, tenant_id=tenant_id, authenticate=authenticate)", "response": "Sets the username and password directly."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef authenticate(self, username=None, password=None, api_key=None,\n tenant_id=None, connect=False):\n \"\"\"\n If the user's credentials include an API key, the default behavior will\n work. But if they are using a password, the initial attempt will fail,\n so try again, but this time using the standard password format.\n\n The 'connect' parameter is retained for backwards compatibility. It no\n longer has any effect.\n \"\"\"\n try:\n super(RaxIdentity, self).authenticate(username=username,\n password=password, api_key=api_key, tenant_id=tenant_id)\n except exc.AuthenticationFailed:\n self._creds_style = \"password\"\n super(RaxIdentity, self).authenticate(username=username,\n password=password, api_key=api_key, tenant_id=tenant_id)", "response": "Authenticate the user with the specified credentials."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef auth_with_token(self, token, tenant_id=None, tenant_name=None):\n # Implementation note:\n # Rackspace auth uses one tenant ID for the object_store services and\n # another for everything else. The one that the user would know is the\n # 'everything else' ID, so we need to extract the object_store tenant\n # ID from the initial response, and call the superclass\n # auth_with_token() method a second time with that tenant ID to get the\n # object_store endpoints. We can then add these to the initial\n # endpoints returned by the primary tenant ID, and then continue with\n # the auth process.\n main_resp, main_body = self._call_token_auth(token, tenant_id,\n tenant_name)\n # Get the swift tenant ID\n roles = main_body[\"access\"][\"user\"][\"roles\"]\n ostore = [role for role in roles\n if role[\"name\"] == \"object-store:default\"]\n if ostore:\n ostore_tenant_id = ostore[0][\"tenantId\"]\n ostore_resp, ostore_body = self._call_token_auth(token,\n ostore_tenant_id, None)\n ostore_cat = ostore_body[\"access\"][\"serviceCatalog\"]\n main_cat = main_body[\"access\"][\"serviceCatalog\"]\n main_cat.extend(ostore_cat)\n self._parse_response(main_body)\n self.authenticated = True", "response": "Authenticates with a token."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _parse_response(self, resp):\n super(RaxIdentity, self)._parse_response(resp)\n user = resp[\"access\"][\"user\"]\n defreg = user.get(\"RAX-AUTH:defaultRegion\")\n if defreg:\n self._default_region = defreg", "response": "Gets the authentication information from the returned JSON."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_client(self, service, region, public=True, cached=True):\n client_class = None\n # Cloud Networks currently uses nova-networks, so it doesn't appear as\n # a separate entry in the service catalog. This hack will allow context\n # objects to continue to work with Rackspace Cloud Networks. When the\n # Neutron service is implemented, this hack will have to be removed.\n if service in (\"compute:networks\", \"networks\", \"network\",\n \"cloudnetworks\", \"cloud_networks\"):\n service = \"compute\"\n client_class = CloudNetworkClient\n return super(RaxIdentity, self).get_client(service, region,\n public=public, cached=cached, client_class=client_class)", "response": "Returns the client object for the given service and region."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a User object for the specified user.", "response": "def get_user(self, user_id=None, username=None, email=None):\n \"\"\"\n Returns the user specified by either ID, username or email.\n\n Since more than user can have the same email address, searching by that\n term will return a list of 1 or more User objects. Searching by\n username or ID will return a single User.\n\n If a user_id that doesn't belong to the current account is searched\n for, a Forbidden exception is raised. When searching by username or\n email, a NotFound exception is raised if there is no matching user.\n \"\"\"\n if user_id:\n uri = \"/users/%s\" % user_id\n elif username:\n uri = \"/users?name=%s\" % username\n elif email:\n uri = \"/users?email=%s\" % email\n else:\n raise ValueError(\"You must include one of 'user_id', \"\n \"'username', or 'email' when calling get_user().\")\n resp, resp_body = self.method_get(uri)\n if resp.status_code == 404:\n raise exc.NotFound(\"No such user exists.\")\n users = resp_body.get(\"users\", [])\n if users:\n return [User(self, user) for user in users]\n else:\n user = resp_body.get(\"user\", {})\n if user:\n return User(self, user)\n else:\n raise exc.NotFound(\"No such user exists.\")"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nallowing you to update settings for a given user.", "response": "def update_user(self, user, email=None, username=None,\n uid=None, defaultRegion=None, enabled=None):\n \"\"\"\n Allows you to update settings for a given user.\n \"\"\"\n user_id = utils.get_id(user)\n uri = \"users/%s\" % user_id\n upd = {\"id\": user_id}\n if email is not None:\n upd[\"email\"] = email\n if defaultRegion is not None:\n upd[\"RAX-AUTH:defaultRegion\"] = defaultRegion\n if username is not None:\n upd[\"username\"] = username\n if enabled is not None:\n upd[\"enabled\"] = enabled\n data = {\"user\": upd}\n resp, resp_body = self.method_put(uri, data=data)\n if resp.status_code in (401, 403, 404):\n raise exc.AuthorizationFailure(\"You are not authorized to update \"\n \"users.\")\n return User(self, resp_body)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nresetting the API key for the specified user or returns the currently - authenticated API key.", "response": "def reset_api_key(self, user=None):\n \"\"\"\n Resets the API key for the specified user, or if no user is specified,\n for the current user. Returns the newly-created API key.\n\n Resetting an API key does not invalidate any authenticated sessions,\n nor does it revoke any tokens.\n \"\"\"\n if user is None:\n user_id = utils.get_id(self)\n else:\n user_id = utils.get_id(user)\n uri = \"users/%s/OS-KSADM/credentials/\" % user_id\n uri += \"RAX-KSKEY:apiKeyCredentials/RAX-AUTH/reset\"\n resp, resp_body = self.method_post(uri)\n return resp_body.get(\"RAX-KSKEY:apiKeyCredentials\", {}).get(\"apiKey\")"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _make_policies(self):\n self.policies = [AutoScalePolicy(self.manager, dct, self)\n for dct in self.scalingPolicies]", "response": "Convert the scalingPolicies dictionary into AutoScalePolicy objects."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nupdate this ScalingGroup. One or more of the attributes can be specified. NOTE: if you specify metadata, it will *replace* any existing metadata. If you want to add to it, you either need to pass the complete dict of metadata, or call the update_metadata() method.", "response": "def update(self, name=None, cooldown=None, min_entities=None,\n max_entities=None, metadata=None):\n \"\"\"\n Updates this ScalingGroup. One or more of the attributes can be\n specified.\n\n NOTE: if you specify metadata, it will *replace* any existing metadata.\n If you want to add to it, you either need to pass the complete dict of\n metadata, or call the update_metadata() method.\n \"\"\"\n return self.manager.update(self, name=name,\n cooldown=cooldown, min_entities=min_entities,\n max_entities=max_entities, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nadds a new policy to this scaling group.", "response": "def add_policy(self, name, policy_type, cooldown, change=None,\n is_percent=False, desired_capacity=None, args=None):\n \"\"\"\n Adds a policy with the given values to this scaling group. The\n 'change' parameter is treated as an absolute amount, unless\n 'is_percent' is True, in which case it is treated as a percentage.\n \"\"\"\n return self.manager.add_policy(self, name, policy_type, cooldown,\n change=change, is_percent=is_percent,\n desired_capacity=desired_capacity, args=args)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef execute_policy(self, policy):\n return self.manager.execute_policy(scaling_group=self, policy=policy)", "response": "Executes the specified policy for this scaling group."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef delete_policy(self, policy):\n return self.manager.delete_policy(scaling_group=self, policy=policy)", "response": "Deletes the specified policy from this scaling group."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nadd a webhook to the specified policy.", "response": "def add_webhook(self, policy, name, metadata=None):\n \"\"\"\n Adds a webhook to the specified policy.\n \"\"\"\n return self.manager.add_webhook(self, policy, name, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nupdates the metadata dict for the specified webhook.", "response": "def update_webhook_metadata(self, policy, webhook, metadata):\n \"\"\"\n Adds the given metadata dict to the existing metadata for the specified\n webhook.\n \"\"\"\n return self.manager.update_webhook_metadata(self, policy, webhook,\n metadata)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ndeletes the specified webhook from the specified policy.", "response": "def delete_webhook(self, policy, webhook):\n \"\"\"\n Deletes the specified webhook from the specified policy.\n \"\"\"\n return self.manager.delete_webhook(self, policy, webhook)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the current state of the specified scaling group as a dictionary.", "response": "def get_state(self, scaling_group):\n \"\"\"\n Returns the current state of the specified scaling group as a\n dictionary.\n \"\"\"\n uri = \"/%s/%s/state\" % (self.uri_base, utils.get_id(scaling_group))\n resp, resp_body = self.api.method_get(uri)\n data = resp_body[\"group\"]\n ret = {}\n ret[\"active\"] = [itm[\"id\"] for itm in data[\"active\"]]\n ret[\"active_capacity\"] = data[\"activeCapacity\"]\n ret[\"desired_capacity\"] = data[\"desiredCapacity\"]\n ret[\"pending_capacity\"] = data[\"pendingCapacity\"]\n ret[\"paused\"] = data[\"paused\"]\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\npause all execution of the policies for the specified scaling group.", "response": "def pause(self, scaling_group):\n \"\"\"\n Pauses all execution of the policies for the specified scaling group.\n \"\"\"\n uri = \"/%s/%s/pause\" % (self.uri_base, utils.get_id(scaling_group))\n resp, resp_body = self.api.method_post(uri)\n return None"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_configuration(self, scaling_group):\n uri = \"/%s/%s/config\" % (self.uri_base, utils.get_id(scaling_group))\n resp, resp_body = self.api.method_get(uri)\n return resp_body.get(\"groupConfiguration\")", "response": "Returns the configuration of the specified scaling group."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef replace(self, scaling_group, name, cooldown, min_entities,\n max_entities, metadata=None):\n \"\"\"\n Replace an existing ScalingGroup configuration. All of the attributes\n must be specified If you wish to delete any of the optional attributes,\n pass them in as None.\n \"\"\"\n body = self._create_group_config_body(name, cooldown, min_entities,\n max_entities, metadata=metadata)\n group_id = utils.get_id(scaling_group)\n uri = \"/%s/%s/config\" % (self.uri_base, group_id)\n resp, resp_body = self.api.method_put(uri, body=body)", "response": "Replaces an existing ScalingGroup configuration."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nupdating the attributes of a specific resource in a Scaling Group.", "response": "def update(self, scaling_group, name=None, cooldown=None,\n min_entities=None, max_entities=None, metadata=None):\n \"\"\"\n Updates an existing ScalingGroup. One or more of the attributes can\n be specified.\n\n NOTE: if you specify metadata, it will *replace* any existing metadata.\n If you want to add to it, you either need to pass the complete dict of\n metadata, or call the update_metadata() method.\n \"\"\"\n if not isinstance(scaling_group, ScalingGroup):\n scaling_group = self.get(scaling_group)\n uri = \"/%s/%s/config\" % (self.uri_base, scaling_group.id)\n if cooldown is None:\n cooldown = scaling_group.cooldown\n if min_entities is None:\n min_entities = scaling_group.min_entities\n if max_entities is None:\n max_entities = scaling_group.max_entities\n body = {\"name\": name or scaling_group.name,\n \"cooldown\": cooldown,\n \"minEntities\": min_entities,\n \"maxEntities\": max_entities,\n \"metadata\": metadata or scaling_group.metadata,\n }\n resp, resp_body = self.api.method_put(uri, body=body)\n return None"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_metadata(self, scaling_group, metadata):\n if not isinstance(scaling_group, ScalingGroup):\n scaling_group = self.get(scaling_group)\n curr_meta = scaling_group.metadata\n curr_meta.update(metadata)\n return self.update(scaling_group, metadata=curr_meta)", "response": "Updates the metadata dict for the given scaling group."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_launch_config(self, scaling_group):\n key_map = {\n \"OS-DCF:diskConfig\": \"disk_config\",\n \"flavorRef\": \"flavor\",\n \"imageRef\": \"image\",\n }\n uri = \"/%s/%s/launch\" % (self.uri_base, utils.get_id(scaling_group))\n resp, resp_body = self.api.method_get(uri)\n ret = {}\n data = resp_body.get(\"launchConfiguration\")\n ret[\"type\"] = data.get(\"type\")\n args = data.get(\"args\", {})\n ret[\"load_balancers\"] = args.get(\"loadBalancers\")\n for key, value in args.get(\"server\", {}).items():\n norm_key = key_map.get(key, key)\n ret[norm_key] = value\n return ret", "response": "Returns the launch configuration for the specified scaling group."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreplace an existing launch configuration in the specified scaling group.", "response": "def replace_launch_config(self, scaling_group, launch_config_type,\n server_name, image, flavor, disk_config=None, metadata=None,\n personality=None, networks=None, load_balancers=None,\n key_name=None, config_drive=False, user_data=None):\n \"\"\"\n Replace an existing launch configuration. All of the attributes must be\n specified. If you wish to delete any of the optional attributes, pass\n them in as None.\n \"\"\"\n group_id = utils.get_id(scaling_group)\n uri = \"/%s/%s/launch\" % (self.uri_base, group_id)\n body = self._create_launch_config_body(\n launch_config_type=launch_config_type, server_name=server_name,\n image=image, flavor=flavor, disk_config=disk_config,\n metadata=metadata, personality=personality, networks=networks,\n load_balancers=load_balancers, key_name=key_name,\n config_drive=config_drive, user_data=user_data)\n resp, resp_body = self.api.method_put(uri, body=body)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update_launch_config(self, scaling_group, server_name=None, image=None,\n flavor=None, disk_config=None, metadata=None, personality=None,\n networks=None, load_balancers=None, key_name=None, config_drive=False,\n user_data=None):\n \"\"\"\n Updates the server launch configuration for an existing scaling group.\n One or more of the available attributes can be specified.\n\n NOTE: if you specify metadata, it will *replace* any existing metadata.\n If you want to add to it, you either need to pass the complete dict of\n metadata, or call the update_launch_metadata() method.\n \"\"\"\n if not isinstance(scaling_group, ScalingGroup):\n scaling_group = self.get(scaling_group)\n uri = \"/%s/%s/launch\" % (self.uri_base, scaling_group.id)\n largs = scaling_group.launchConfiguration.get(\"args\", {})\n srv_args = largs.get(\"server\", {})\n lb_args = largs.get(\"loadBalancers\", {})\n flav = flavor or srv_args.get(\"flavorRef\")\n dconf = disk_config or srv_args.get(\"OS-DCF:diskConfig\", \"AUTO\")\n if personality is None:\n personality = srv_args.get(\"personality\", [])\n cfg_drv = config_drive or srv_args.get(\"config_drive\")\n if user_data:\n user_data = base64.b64encode(user_data)\n usr_data = user_data or srv_args.get(\"user_data\")\n update_metadata = metadata or srv_args.get(\"metadata\")\n body = {\"type\": \"launch_server\",\n \"args\": {\n \"server\": {\n \"name\": server_name or srv_args.get(\"name\"),\n \"imageRef\": image or srv_args.get(\"imageRef\"),\n \"flavorRef\": flav,\n \"OS-DCF:diskConfig\": dconf,\n \"networks\": networks or srv_args.get(\"networks\"),\n },\n \"loadBalancers\": load_balancers or lb_args,\n },\n }\n bas = body[\"args\"][\"server\"]\n if cfg_drv:\n bas[\"config_drive\"] = cfg_drv\n if usr_data:\n bas[\"user_data\"] = usr_data\n if personality:\n bas[\"personality\"] = self._encode_personality(personality)\n if update_metadata:\n bas[\"metadata\"] = update_metadata\n key_name = key_name or srv_args.get(\"key_name\")\n if key_name:\n bas[\"key_name\"] = key_name\n resp, resp_body = self.api.method_put(uri, body=body)\n return None", "response": "Updates the launch configuration for an existing server."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nupdates the launch metadata for the given scaling group.", "response": "def update_launch_metadata(self, scaling_group, metadata):\n \"\"\"\n Adds the given metadata dict to the existing metadata for the scaling\n group's launch configuration.\n \"\"\"\n if not isinstance(scaling_group, ScalingGroup):\n scaling_group = self.get(scaling_group)\n curr_meta = scaling_group.launchConfiguration.get(\"args\", {}).get(\n \"server\", {}).get(\"metadata\", {})\n curr_meta.update(metadata)\n return self.update_launch_config(scaling_group, metadata=curr_meta)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef add_policy(self, scaling_group, name, policy_type, cooldown,\n change=None, is_percent=False, desired_capacity=None, args=None):\n \"\"\"\n Adds a policy with the given values to the specified scaling group. The\n 'change' parameter is treated as an absolute amount, unless\n 'is_percent' is True, in which case it is treated as a percentage.\n \"\"\"\n uri = \"/%s/%s/policies\" % (self.uri_base, utils.get_id(scaling_group))\n body = self._create_policy_body(name, policy_type, cooldown,\n change=change, is_percent=is_percent,\n desired_capacity=desired_capacity, args=args)\n # \"body\" needs to be a list\n body = [body]\n resp, resp_body = self.api.method_post(uri, body=body)\n pol_info = resp_body.get(\"policies\")[0]\n return AutoScalePolicy(self, pol_info, scaling_group)", "response": "Adds a new policy to the specified scaling group."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_policies(self, scaling_group):\n uri = \"/%s/%s/policies\" % (self.uri_base, utils.get_id(scaling_group))\n resp, resp_body = self.api.method_get(uri)\n return [AutoScalePolicy(self, data, scaling_group)\n for data in resp_body.get(\"policies\", [])]", "response": "Returns a list of all policies defined for the specified scaling group."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreplacing an existing policy in the specified scaling group.", "response": "def replace_policy(self, scaling_group, policy, name,\n policy_type, cooldown, change=None, is_percent=False,\n desired_capacity=None, args=None):\n \"\"\"\n Replace an existing policy. All of the attributes must be specified. If\n you wish to delete any of the optional attributes, pass them in as\n None.\n \"\"\"\n policy_id = utils.get_id(policy)\n group_id = utils.get_id(scaling_group)\n uri = \"/%s/%s/policies/%s\" % (self.uri_base, group_id, policy_id)\n body = self._create_policy_body(name=name, policy_type=policy_type,\n cooldown=cooldown, change=change, is_percent=is_percent,\n desired_capacity=desired_capacity, args=args)\n resp, resp_body = self.api.method_put(uri, body=body)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_policy(self, scaling_group, policy, name=None, policy_type=None,\n cooldown=None, change=None, is_percent=False,\n desired_capacity=None, args=None):\n \"\"\"\n Updates the specified policy. One or more of the parameters may be\n specified.\n \"\"\"\n uri = \"/%s/%s/policies/%s\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy))\n if not isinstance(policy, AutoScalePolicy):\n # Received an ID\n policy = self.get_policy(scaling_group, policy)\n body = {\"name\": name or policy.name,\n \"type\": policy_type or policy.type,\n \"cooldown\": cooldown or policy.cooldown,\n }\n if desired_capacity is not None:\n body[\"desiredCapacity\"] = desired_capacity\n elif change is not None:\n if is_percent:\n body[\"changePercent\"] = change\n else:\n body[\"change\"] = change\n else:\n if getattr(policy, \"changePercent\", None) is not None:\n body[\"changePercent\"] = policy.changePercent\n elif getattr(policy, \"change\", None) is not None:\n body[\"change\"] = policy.change\n elif getattr(policy, \"desiredCapacity\", None) is not None:\n body[\"desiredCapacity\"] = policy.desiredCapacity\n args = args or getattr(policy, \"args\", None)\n if args is not None:\n body[\"args\"] = args\n resp, resp_body = self.api.method_put(uri, body=body)\n return None", "response": "Updates the specified policy. One or more of the parameters may be be\n specified."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef execute_policy(self, scaling_group, policy):\n uri = \"/%s/%s/policies/%s/execute\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy))\n resp, resp_body = self.api.method_post(uri)\n return None", "response": "Executes the specified policy for this scaling group."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes the specified policy from the specified scaling group.", "response": "def delete_policy(self, scaling_group, policy):\n \"\"\"\n Deletes the specified policy from the scaling group.\n \"\"\"\n uri = \"/%s/%s/policies/%s\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy))\n resp, resp_body = self.api.method_delete(uri)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nadds a webhook to the specified policy.", "response": "def add_webhook(self, scaling_group, policy, name, metadata=None):\n \"\"\"\n Adds a webhook to the specified policy.\n \"\"\"\n uri = \"/%s/%s/policies/%s/webhooks\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy))\n body = self._create_webhook_body(name, metadata=metadata)\n # \"body\" needs to be a list\n body = [body]\n resp, resp_body = self.api.method_post(uri, body=body)\n data = resp_body.get(\"webhooks\")[0]\n return AutoScaleWebhook(self, data, policy, scaling_group)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a list of all webhooks for the specified policy.", "response": "def list_webhooks(self, scaling_group, policy):\n \"\"\"\n Returns a list of all webhooks for the specified policy.\n \"\"\"\n uri = \"/%s/%s/policies/%s/webhooks\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy))\n resp, resp_body = self.api.method_get(uri)\n return [AutoScaleWebhook(self, data, policy, scaling_group)\n for data in resp_body.get(\"webhooks\", [])]"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef replace_webhook(self, scaling_group, policy, webhook, name,\n metadata=None):\n \"\"\"\n Replace an existing webhook. All of the attributes must be specified.\n If you wish to delete any of the optional attributes, pass them in as\n None.\n \"\"\"\n uri = \"/%s/%s/policies/%s/webhooks/%s\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy),\n utils.get_id(webhook))\n group_id = utils.get_id(scaling_group)\n policy_id = utils.get_id(policy)\n webhook_id = utils.get_id(webhook)\n body = self._create_webhook_body(name, metadata=metadata)\n resp, resp_body = self.api.method_put(uri, body=body)", "response": "Replaces an existing webhook with a new one."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nupdates the specified webhook.", "response": "def update_webhook(self, scaling_group, policy, webhook, name=None,\n metadata=None):\n \"\"\"\n Updates the specified webhook. One or more of the parameters may be\n specified.\n \"\"\"\n uri = \"/%s/%s/policies/%s/webhooks/%s\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy),\n utils.get_id(webhook))\n if not isinstance(webhook, AutoScaleWebhook):\n # Received an ID\n webhook = self.get_webhook(scaling_group, policy, webhook)\n body = {\"name\": name or webhook.name,\n \"metadata\": metadata or webhook.metadata,\n }\n resp, resp_body = self.api.method_put(uri, body=body)\n webhook.reload()\n return webhook"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_webhook_metadata(self, scaling_group, policy, webhook, metadata):\n if not isinstance(webhook, AutoScaleWebhook):\n webhook = self.get_webhook(scaling_group, policy, webhook)\n curr_meta = webhook.metadata or {}\n curr_meta.update(metadata)\n return self.update_webhook(scaling_group, policy, webhook,\n metadata=curr_meta)", "response": "Updates the metadata dict for the specified webhook."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef delete_webhook(self, scaling_group, policy, webhook):\n uri = \"/%s/%s/policies/%s/webhooks/%s\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy),\n utils.get_id(webhook))\n resp, resp_body = self.api.method_delete(uri)\n return None", "response": "Deletes the specified webhook from the specified policy."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _resolve_lbs(load_balancers):\n lb_args = []\n if not isinstance(load_balancers, list):\n lbs = [load_balancers]\n else:\n lbs = load_balancers\n for lb in lbs:\n if isinstance(lb, dict):\n lb_args.append(lb)\n elif isinstance(lb, CloudLoadBalancer):\n lb_args.append({\n \"loadBalancerId\": lb.id,\n \"port\": lb.port,\n })\n elif isinstance(lb, tuple):\n lb_args.append({\"loadBalancerId\": lb[0],\n \"port\": lb[1]})\n else:\n # See if it's an ID for a Load Balancer\n try:\n instance = pyrax.cloud_loadbalancers.get(lb)\n except Exception:\n raise exc.InvalidLoadBalancer(\"Received an invalid \"\n \"specification for a Load Balancer: '%s'\" % lb)\n lb_args.append({\n \"loadBalancerId\": instance.id,\n \"port\": instance.port,\n })\n return lb_args", "response": "Resolves the LBs and returns the arguments required for creating a Scaling Group."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _encode_personality(self, personality):\n if personality is None:\n personality = []\n else:\n personality = utils.coerce_to_list(personality)\n for pfile in personality:\n if \"contents\" in pfile:\n pfile[\"contents\"] = base64.b64encode(pfile[\"contents\"])\n return personality", "response": "Encode the personality files to base64 before transmitting."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _create_body(self, name, cooldown, min_entities, max_entities,\n launch_config_type, server_name, image, flavor, disk_config=None,\n metadata=None, personality=None, networks=None,\n load_balancers=None, scaling_policies=None, group_metadata=None,\n key_name=None, config_drive=False, user_data=None):\n \"\"\"\n Used to create the dict required to create any of the following:\n A Scaling Group\n \"\"\"\n if metadata is None:\n metadata = {}\n if scaling_policies is None:\n scaling_policies = []\n group_config = self._create_group_config_body(name, cooldown,\n min_entities, max_entities, metadata=group_metadata)\n launch_config = self._create_launch_config_body(launch_config_type,\n server_name, image, flavor, disk_config=disk_config,\n metadata=metadata, personality=personality, networks=networks,\n load_balancers=load_balancers, key_name=key_name,\n config_drive=config_drive, user_data=user_data)\n body = {\n \"groupConfiguration\": group_config,\n \"launchConfiguration\": launch_config,\n \"scalingPolicies\": scaling_policies,\n }\n return body", "response": "Create the dict required to create a new object for the object."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nadds a webhook to this policy.", "response": "def add_webhook(self, name, metadata=None):\n \"\"\"\n Adds a webhook to this policy.\n \"\"\"\n return self.manager.add_webhook(self.scaling_group, self, name,\n metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_webhook(self, webhook):\n return self.manager.get_webhook(self.scaling_group, self, webhook)", "response": "Gets the detail for the specified webhook."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_webhook(self, webhook, name=None, metadata=None):\n return self.manager.update_webhook(self.scaling_group, policy=self,\n webhook=webhook, name=name, metadata=metadata)", "response": "Updates the specified webhook."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_webhook_metadata(self, webhook, metadata):\n return self.manager.update_webhook_metadata(self.scaling_group, self,\n webhook, metadata)", "response": "Updates the metadata dict for the specified webhook."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ndeletes the specified webhook from this policy.", "response": "def delete_webhook(self, webhook):\n \"\"\"\n Deletes the specified webhook from this policy.\n \"\"\"\n return self.manager.delete_webhook(self.scaling_group, self, webhook)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate this webhook. One or more of the parameters may be specified.", "response": "def update(self, name=None, metadata=None):\n \"\"\"\n Updates this webhook. One or more of the parameters may be specified.\n \"\"\"\n return self.policy.update_webhook(self, name=name, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _configure_manager(self):\n self._manager = ScalingGroupManager(self,\n resource_class=ScalingGroup, response_key=\"group\",\n uri_base=\"groups\")", "response": "Creates a manager to handle autoscale operations."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreplaces an existing ScalingGroup configuration.", "response": "def replace(self, scaling_group, name, cooldown, min_entities,\n max_entities, metadata=None):\n \"\"\"\n Replace an existing ScalingGroup configuration. All of the attributes\n must be specified. If you wish to delete any of the optional\n attributes, pass them in as None.\n \"\"\"\n return self._manager.replace(scaling_group, name, cooldown,\n min_entities, max_entities, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nupdating an existing ScalingGroup.", "response": "def update(self, scaling_group, name=None, cooldown=None, min_entities=None,\n max_entities=None, metadata=None):\n \"\"\"\n Updates an existing ScalingGroup. One or more of the attributes can be\n specified.\n\n NOTE: if you specify metadata, it will *replace* any existing metadata.\n If you want to add to it, you either need to pass the complete dict of\n metadata, or call the update_metadata() method.\n \"\"\"\n return self._manager.update(scaling_group, name=name, cooldown=cooldown,\n min_entities=min_entities, max_entities=max_entities,\n metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef replace_launch_config(self, scaling_group, launch_config_type,\n server_name, image, flavor, disk_config=None, metadata=None,\n personality=None, networks=None, load_balancers=None,\n key_name=None):\n \"\"\"\n Replace an existing launch configuration. All of the attributes must be\n specified. If you wish to delete any of the optional attributes, pass\n them in as None.\n \"\"\"\n return self._manager.replace_launch_config(scaling_group,\n launch_config_type, server_name, image, flavor,\n disk_config=disk_config, metadata=metadata,\n personality=personality, networks=networks,\n load_balancers=load_balancers, key_name=key_name)", "response": "Replaces an existing launch configuration."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nupdates the server launch configuration for an existing scaling group.", "response": "def update_launch_config(self, scaling_group, server_name=None, image=None,\n flavor=None, disk_config=None, metadata=None, personality=None,\n networks=None, load_balancers=None, key_name=None, config_drive=False,\n user_data=None):\n \"\"\"\n Updates the server launch configuration for an existing scaling group.\n One or more of the available attributes can be specified.\n\n NOTE: if you specify metadata, it will *replace* any existing metadata.\n If you want to add to it, you either need to pass the complete dict of\n metadata, or call the update_launch_metadata() method.\n \"\"\"\n return self._manager.update_launch_config(scaling_group,\n server_name=server_name, image=image, flavor=flavor,\n disk_config=disk_config, metadata=metadata,\n personality=personality, networks=networks,\n load_balancers=load_balancers, key_name=key_name,\n config_drive=config_drive, user_data=user_data)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef add_policy(self, scaling_group, name, policy_type, cooldown,\n change=None, is_percent=False, desired_capacity=None, args=None):\n \"\"\"\n Adds a policy with the given values to the specified scaling group. The\n 'change' parameter is treated as an absolute amount, unless\n 'is_percent' is True, in which case it is treated as a percentage.\n \"\"\"\n return self._manager.add_policy(scaling_group, name, policy_type,\n cooldown, change=change, is_percent=is_percent,\n desired_capacity=desired_capacity, args=args)", "response": "Adds a new policy with the given values to the specified scaling group."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nupdate the specified policy.", "response": "def update_policy(self, scaling_group, policy, name=None, policy_type=None,\n cooldown=None, change=None, is_percent=False,\n desired_capacity=None, args=None):\n \"\"\"\n Updates the specified policy. One or more of the parameters may be\n specified.\n \"\"\"\n return self._manager.update_policy(scaling_group, policy, name=name,\n policy_type=policy_type, cooldown=cooldown, change=change,\n is_percent=is_percent, desired_capacity=desired_capacity,\n args=args)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nexecute the specified policy for the specified scaling group.", "response": "def execute_policy(self, scaling_group, policy):\n \"\"\"\n Executes the specified policy for the scaling group.\n \"\"\"\n return self._manager.execute_policy(scaling_group=scaling_group,\n policy=policy)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes the specified policy from the specified scaling group.", "response": "def delete_policy(self, scaling_group, policy):\n \"\"\"\n Deletes the specified policy from the scaling group.\n \"\"\"\n return self._manager.delete_policy(scaling_group=scaling_group,\n policy=policy)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nadds a webhook to the specified policy.", "response": "def add_webhook(self, scaling_group, policy, name, metadata=None):\n \"\"\"\n Adds a webhook to the specified policy.\n \"\"\"\n return self._manager.add_webhook(scaling_group, policy, name,\n metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting the details for the specified webhook.", "response": "def get_webhook(self, scaling_group, policy, webhook):\n \"\"\"\n Gets the detail for the specified webhook.\n \"\"\"\n return self._manager.get_webhook(scaling_group, policy, webhook)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef replace_webhook(self, scaling_group, policy, webhook, name,\n metadata=None):\n \"\"\"\n Replace an existing webhook. All of the attributes must be specified.\n If you wish to delete any of the optional attributes, pass them in as\n None.\n \"\"\"\n return self._manager.replace_webhook(scaling_group, policy, webhook,\n name, metadata=metadata)", "response": "Replace an existing webhook."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nupdates the specified webhook.", "response": "def update_webhook(self, scaling_group, policy, webhook, name=None,\n metadata=None):\n \"\"\"\n Updates the specified webhook. One or more of the parameters may be\n specified.\n \"\"\"\n return self._manager.update_webhook(scaling_group=scaling_group,\n policy=policy, webhook=webhook, name=name, metadata=metadata)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef update_webhook_metadata(self, scaling_group, policy, webhook, metadata):\n return self._manager.update_webhook_metadata(scaling_group, policy,\n webhook, metadata)", "response": "Updates the metadata dict for the specified webhook."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ndelete the specified webhook from the specified policy.", "response": "def delete_webhook(self, scaling_group, policy, webhook):\n \"\"\"\n Deletes the specified webhook from the policy.\n \"\"\"\n return self._manager.delete_webhook(scaling_group, policy, webhook)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a human - readable ID for the current object.", "response": "def human_id(self):\n \"\"\"Subclasses may override this to provide a pretty ID which can be used\n for bash completion.\n \"\"\"\n if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID:\n return utils.to_slug(getattr(self, self.NAME_ATTR))\n return None"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nadd details to the object.", "response": "def _add_details(self, info):\n \"\"\"\n Takes the dict returned by the API call and sets the\n corresponding attributes on the object.\n \"\"\"\n for (key, val) in six.iteritems(info):\n if isinstance(key, six.text_type) and six.PY2:\n key = key.encode(pyrax.get_encoding())\n elif isinstance(key, bytes):\n key = key.decode(\"utf-8\")\n setattr(self, key, val)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets the details for the object.", "response": "def get(self):\n \"\"\"Gets the details for the object.\"\"\"\n # set 'loaded' first ... so if we have to bail, we know we tried.\n self.loaded = True\n if not hasattr(self.manager, \"get\"):\n return\n if not self.get_details:\n return\n new = self.manager.get(self)\n if new:\n self._add_details(new._info)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndeleting the object from the manager.", "response": "def delete(self):\n \"\"\"Deletes the object.\"\"\"\n # set 'loaded' first ... so if we have to bail, we know we tried.\n self.loaded = True\n if not hasattr(self.manager, \"delete\"):\n return\n self.manager.delete(self)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a list of resource objects.", "response": "def list(self, limit=None, marker=None):\n \"\"\"\n Returns a list of resource objects. Pagination is supported through the\n optional 'marker' and 'limit' parameters.\n \"\"\"\n return self._manager.list(limit=limit, marker=marker)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nmaking a request to the API.", "response": "def request(self, uri, method, *args, **kwargs):\n \"\"\"\n Formats the request into a dict representing the headers\n and body that will be used to make the API call.\n \"\"\"\n if self.timeout:\n kwargs[\"timeout\"] = self.timeout\n kwargs[\"verify\"] = self.verify_ssl\n kwargs.setdefault(\"headers\", kwargs.get(\"headers\", {}))\n kwargs[\"headers\"][\"User-Agent\"] = self.user_agent\n kwargs[\"headers\"][\"Accept\"] = \"application/json\"\n if (\"body\" in kwargs) or (\"data\" in kwargs):\n if \"Content-Type\" not in kwargs[\"headers\"]:\n kwargs[\"headers\"][\"Content-Type\"] = \"application/json\"\n elif kwargs[\"headers\"][\"Content-Type\"] is None:\n del kwargs[\"headers\"][\"Content-Type\"]\n # Allow subclasses to add their own headers\n self._add_custom_headers(kwargs[\"headers\"])\n resp, body = pyrax.http.request(method, uri, *args, **kwargs)\n if resp.status_code >= 400:\n raise exc.from_response(resp, body)\n return resp, body"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nwraps the request and records the elapsed time.", "response": "def _time_request(self, uri, method, **kwargs):\n \"\"\"Wraps the request call and records the elapsed time.\"\"\"\n start_time = time.time()\n resp, body = self.request(uri, method, **kwargs)\n self.times.append((\"%s %s\" % (method, uri),\n start_time, time.time()))\n return resp, body"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _api_request(self, uri, method, **kwargs):\n id_svc = self.identity\n if not all((self.management_url, id_svc.token, id_svc.tenant_id)):\n id_svc.authenticate()\n\n if not self.management_url:\n # We've authenticated but no management_url has been set. This\n # indicates that the service is not available.\n raise exc.ServiceNotAvailable(\"The '%s' service is not available.\"\n % self)\n if uri.startswith(\"http\"):\n parsed = list(urllib.parse.urlparse(uri))\n for pos, item in enumerate(parsed):\n if pos < 2:\n # Don't escape the scheme or netloc\n continue\n parsed[pos] = _safe_quote(parsed[pos])\n safe_uri = urllib.parse.urlunparse(parsed)\n else:\n safe_uri = \"%s%s\" % (self.management_url, _safe_quote(uri))\n # Perform the request once. If we get a 401 back then it\n # might be because the auth token expired, so try to\n # re-authenticate and try again. If it still fails, bail.\n try:\n kwargs.setdefault(\"headers\", {})[\"X-Auth-Token\"] = id_svc.token\n if id_svc.tenant_id:\n kwargs[\"headers\"][\"X-Auth-Project-Id\"] = id_svc.tenant_id\n resp, body = self._time_request(safe_uri, method, **kwargs)\n return resp, body\n except exc.Unauthorized as ex:\n try:\n id_svc.authenticate()\n kwargs[\"headers\"][\"X-Auth-Token\"] = id_svc.token\n resp, body = self._time_request(safe_uri, method, **kwargs)\n return resp, body\n except exc.Unauthorized:\n raise ex", "response": "Perform the API request."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a list of resource objects.", "response": "def list(self, limit=None, marker=None, return_raw=False, other_keys=None):\n \"\"\"\n Returns a list of resource objects. Pagination is supported through the\n optional 'marker' and 'limit' parameters.\n\n Some APIs do not follow the typical pattern in their responses, and the\n BaseManager subclasses will have to parse the raw response to get the\n desired information. For those cases, pass 'return_raw=True', and the\n response and response_body will be returned unprocessed.\n\n Another use case is when additional information is returned in the\n response body. To have that returned, use the 'other_keys' parameter.\n This can be either a single string or a list of strings that correspond\n to keys in the response body. If specified, a 2-tuple is returned, with\n the first element being the list of resources, and the second a dict\n whose keys are the 'other_keys' items, and whose values are the\n corresponding values in the response body, or None if no such key is\n present.\n \"\"\"\n uri = \"/%s\" % self.uri_base\n pagination_items = []\n if limit is not None:\n pagination_items.append(\"limit=%s\" % limit)\n if marker is not None:\n pagination_items.append(\"marker=%s\" % marker)\n pagination = \"&\".join(pagination_items)\n if pagination:\n uri = \"%s?%s\" % (uri, pagination)\n return self._list(uri, return_raw=return_raw, other_keys=other_keys)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef head(self, item):\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(item))\n return self._head(uri)", "response": "Makes a HEAD request on a specific item."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get(self, item):\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(item))\n return self._get(uri)", "response": "Gets a specific item."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating a new entry in the cache.", "response": "def create(self, name, *args, **kwargs):\n \"\"\"\n Subclasses need to implement the _create_body() method to return a dict\n that will be used for the API request body.\n\n For cases where no response is returned from the API on creation, pass\n `return_none=True` so that the _create method doesn't expect one.\n\n For cases where you do not want the _create method to attempt to parse\n the response, but instead have it returned directly, pass\n `return_raw=True`.\n\n For cases where the API returns information in the response and not the\n response_body, pass `return_response=True`.\n \"\"\"\n return_none = kwargs.pop(\"return_none\", False)\n return_raw = kwargs.pop(\"return_raw\", False)\n return_response = kwargs.pop(\"return_response\", False)\n body = self._create_body(name, *args, **kwargs)\n return self._create(\"/%s\" % self.uri_base, body,\n return_none=return_none, return_raw=return_raw,\n return_response=return_response)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes the specified item.", "response": "def delete(self, item):\n \"\"\"Deletes the specified item.\"\"\"\n uri = \"/%s/%s\" % (self.uri_base, utils.get_id(item))\n return self._delete(uri)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nhandling the communication with the API when getting a full listing of the resources managed by this class.", "response": "def _list(self, uri, obj_class=None, body=None, return_raw=False,\n other_keys=None):\n \"\"\"\n Handles the communication with the API when getting\n a full listing of the resources managed by this class.\n \"\"\"\n if body:\n resp, resp_body = self.api.method_post(uri, body=body)\n else:\n resp, resp_body = self.api.method_get(uri)\n if return_raw:\n return (resp, resp_body)\n if obj_class is None:\n obj_class = self.resource_class\n\n data = self._data_from_response(resp_body)\n ret = [obj_class(self, res, loaded=False) for res in data if res]\n if other_keys:\n keys = utils.coerce_to_list(other_keys)\n other = [self._data_from_response(resp_body, key) for key in keys]\n return (ret, other)\n else:\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _data_from_response(self, resp_body, key=None):\n if key:\n data = resp_body.get(key)\n else:\n data = resp_body.get(self.plural_response_key, resp_body)\n # NOTE(ja): some services, such as keystone returns values as list as\n # {\"values\": [ ... ]} unlike other services which just return the\n # list.\n if isinstance(data, dict):\n try:\n data = data[\"values\"]\n except KeyError:\n pass\n return data", "response": "This method extracts the data from the response body."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _head(self, uri):\n resp, resp_body = self.api.method_head(uri)\n return resp", "response": "Handles the communication with the API when performing a HEAD request. Returns the response body."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nhandling the communication with the API when getting a specific resource managed by this class.", "response": "def _get(self, uri):\n \"\"\"\n Handles the communication with the API when getting\n a specific resource managed by this class.\n \"\"\"\n resp, resp_body = self.api.method_get(uri)\n return self.resource_class(self, resp_body, self.response_key,\n loaded=True)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _create(self, uri, body, return_none=False, return_raw=False,\n return_response=None, **kwargs):\n \"\"\"\n Handles the communication with the API when creating a new\n resource managed by this class.\n \"\"\"\n self.run_hooks(\"modify_body_for_create\", body, **kwargs)\n resp, resp_body = self.api.method_post(uri, body=body)\n if return_none:\n # No response body\n return\n elif return_response:\n return resp\n elif return_raw:\n if self.response_key:\n return resp_body[self.response_key]\n else:\n return resp_body\n return self.resource_class(self, resp_body, self.response_key)", "response": "Create a new object of the class"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _update(self, uri, body, **kwargs):\n self.run_hooks(\"modify_body_for_update\", body, **kwargs)\n resp, resp_body = self.api.method_put(uri, body=body)\n return resp_body", "response": "Handles the communication with the API when updating a specific resource in the cluster store."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef action(self, item, action_type, body={}):\n uri = \"/%s/%s/action\" % (self.uri_base, utils.get_id(item))\n action_body = {action_type: body}\n return self.api.method_post(uri, body=action_body)", "response": "This method is used to perform action on a specific item."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef find(self, **kwargs):\n matches = self.findall(**kwargs)\n num_matches = len(matches)\n if not num_matches:\n msg = \"No %s matching: %s.\" % (self.resource_class.__name__, kwargs)\n raise exc.NotFound(404, msg)\n if num_matches > 1:\n msg = \"More than one %s matching: %s.\" % (\n self.resource_class.__name__, kwargs)\n raise exc.NoUniqueMatch(400, msg)\n else:\n return matches[0]", "response": "Find a single item with attributes matching kwargs."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the Endpoint for a given region.", "response": "def _ep_for_region(self, region):\n \"\"\"\n Given a region, returns the Endpoint for that region, or the Endpoint\n for the ALL region if no match is found. If no match is found, None\n is returned, and it is up to the calling method to handle it\n appropriately.\n \"\"\"\n rgn = region.upper()\n try:\n rgn_ep = [ep for ep in list(self.endpoints.values())\n if ep.region.upper() == rgn][0]\n except IndexError:\n # See if there is an 'ALL' region.\n try:\n rgn_ep = [ep for ep in list(self.endpoints.values())\n if ep.region.upper() == \"ALL\"][0]\n except IndexError:\n rgn_ep = None\n return rgn_ep"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning an instance of the appropriate client class for the given region.", "response": "def get_client(self, region):\n \"\"\"\n Returns an instance of the appropriate client class for the given\n region. If there is no endpoint for that region, a NoEndpointForRegion\n exception is raised.\n \"\"\"\n ep = self._ep_for_region(region)\n if not ep:\n raise exc.NoEndpointForRegion(\"There is no endpoint defined for the \"\n \"region '%s' for the '%s' service.\" % (region,\n self.service_type))\n return ep.client"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a new client for this endpoint.", "response": "def get_new_client(self, public=True):\n \"\"\"\n Returns a new instance of the client for this endpoint.\n \"\"\"\n return self._get_client(public=public, cached=False)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get(self, url_type):\n lowtype = url_type.lower()\n if lowtype == \"public\":\n return self.public_url\n elif lowtype == \"private\":\n return self.private_url\n else:\n raise ValueError(\"Valid values are 'public' or 'private'; \"\n \"received '%s'.\" % url_type)", "response": "Returns the value of the\nMimeType corresponding to the given url_type."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncreate a client instance for the service.", "response": "def _create_client(self, clt_class, url, public=True, special=False):\n \"\"\"\n Creates a client instance for the service.\n \"\"\"\n if self.service == \"compute\" and not special:\n # Novaclient requires different parameters.\n client = pyrax.connect_to_cloudservers(region=self.region,\n context=self.identity, verify_ssl=self.verify_ssl)\n client.identity = self.identity\n else:\n client = clt_class(self.identity, region_name=self.region,\n management_url=url, verify_ssl=self.verify_ssl)\n return client"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the client object for the given service and region.", "response": "def get_client(self, service, region, public=True, cached=True,\n client_class=None):\n \"\"\"\n Returns the client object for the specified service and region.\n\n By default the public endpoint is used. If you wish to work with a\n services internal endpoints, specify `public=False`.\n\n By default, if a client has already been created for the given service,\n region, and public values, that will be returned. To force a new client\n to be created, pass 'cached=False'.\n \"\"\"\n if not self.authenticated:\n raise exc.NotAuthenticated(\"You must authenticate before trying \"\n \"to create clients.\")\n clt = ep = None\n mapped_service = self.service_mapping.get(service) or service\n svc = self.services.get(mapped_service)\n if svc:\n ep = svc.endpoints.get(region)\n if ep:\n clt = ep._get_client(public=public, cached=cached,\n client_class=client_class)\n if not clt:\n raise exc.NoSuchClient(\"There is no client available for the \"\n \"service '%s' in the region '%s'.\" % (service, region))\n return clt"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef set_credentials(self, username, password=None, region=None,\n tenant_id=None, authenticate=False):\n \"\"\"Sets the username and password directly.\"\"\"\n self.username = username\n self.password = password\n self.tenant_id = tenant_id\n if region:\n self.region = region\n if authenticate:\n self.authenticate()", "response": "Sets the username and password directly."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreading in the credentials from the supplied file and sets the credentials for the current instance of the class.", "response": "def set_credential_file(self, credential_file, region=None,\n tenant_id=None, authenticate=False):\n \"\"\"\n Reads in the credentials from the supplied file. It should be\n a standard config file in the format:\n\n [keystone]\n username = myusername\n password = top_secret\n tenant_id = my_id\n\n \"\"\"\n self._creds_file = credential_file\n cfg = ConfigParser.SafeConfigParser()\n try:\n if not cfg.read(credential_file):\n # If the specified file does not exist, the parser returns an\n # empty list.\n raise exc.FileNotFound(\"The specified credential file '%s' \"\n \"does not exist\" % credential_file)\n except ConfigParser.MissingSectionHeaderError as e:\n # The file exists, but doesn't have the correct format.\n raise exc.InvalidCredentialFile(e)\n try:\n self._read_credential_file(cfg)\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:\n raise exc.InvalidCredentialFile(e)\n if region:\n self.region = region\n if authenticate:\n self.authenticate()"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef auth_with_token(self, token, tenant_id=None, tenant_name=None):\n resp, resp_body = self._call_token_auth(token, tenant_id, tenant_name)\n self._parse_response(resp_body)\n self.authenticated = True", "response": "Authenticates with a token"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _read_credential_file(self, cfg):\n self.username = cfg.get(\"keystone\", \"username\")\n self.password = cfg.get(\"keystone\", \"password\", raw=True)\n self.tenant_id = cfg.get(\"keystone\", \"tenant_id\")", "response": "Reads the credential file and sets the instance variables."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the current credentials in the format expected by the authentication service.", "response": "def _format_credentials(self):\n \"\"\"\n Returns the current credentials in the format expected by\n the authentication service.\n \"\"\"\n tenant_name = self.tenant_name or self.username\n tenant_id = self.tenant_id or self.username\n return {\"auth\": {\"passwordCredentials\":\n {\"username\": tenant_name,\n \"password\": self.password,\n },\n \"tenantId\": tenant_id}}"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nhandling all common functionality required for API calls. Returns a response object.", "response": "def _call(self, mthd, uri, admin, data, headers, std_headers):\n \"\"\"\n Handles all the common functionality required for API calls. Returns\n the resulting response object.\n \"\"\"\n if not uri.startswith(\"http\"):\n uri = \"/\".join((self.auth_endpoint.rstrip(\"/\"), uri))\n if admin:\n # Admin calls use a different port\n uri = re.sub(r\":\\d+/\", \":35357/\", uri)\n if std_headers:\n hdrs = self._standard_headers()\n else:\n hdrs = {}\n if headers:\n hdrs.update(headers)\n kwargs = {\"headers\": hdrs}\n if data:\n kwargs[\"body\"] = data\n if \"tokens\" in uri:\n # We'll handle the exception here\n kwargs[\"raise_exception\"] = False\n return pyrax.http.request(mthd, uri, verify=self.verify_ssl, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef authenticate(self, username=None, password=None, api_key=None,\n tenant_id=None, connect=False):\n \"\"\"\n Using the supplied credentials, connects to the specified\n authentication endpoint and attempts to log in.\n\n Credentials can either be passed directly to this method, or\n previously-stored credentials can be used. If authentication is\n successful, the token and service catalog information is stored, and\n clients for each service and region are created.\n\n The 'connect' parameter is retained for backwards compatibility. It no\n longer has any effect.\n \"\"\"\n self.username = username or self.username or pyrax.get_setting(\n \"username\")\n # Different identity systems may pass these under inconsistent names.\n self.password = password or self.password or api_key or self.api_key\n self.api_key = api_key or self.api_key or self.password\n self.tenant_id = tenant_id or self.tenant_id or pyrax.get_setting(\n \"tenant_id\")\n creds = self._format_credentials()\n headers = {\"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n }\n resp, resp_body = self.method_post(\"tokens\", data=creds,\n headers=headers, std_headers=False)\n\n if resp.status_code == 401:\n # Invalid authorization\n raise exc.AuthenticationFailed(\"Incorrect/unauthorized \"\n \"credentials received\")\n elif 500 <= resp.status_code < 600:\n # Internal Server Error\n try:\n error_msg = resp_body[list(resp_body.keys())[0]][\"message\"]\n except (KeyError, AttributeError):\n error_msg = \"Service Currently Unavailable\"\n raise exc.InternalServerError(error_msg)\n elif resp.status_code > 299:\n try:\n msg = resp_body[list(resp_body.keys())[0]][\"message\"]\n except (KeyError, AttributeError):\n msg = None\n if msg:\n err = \"%s - %s.\" % (resp.reason, msg)\n else:\n err = \"%s.\" % resp.reason\n raise exc.AuthenticationFailed(err)\n self._parse_response(resp_body)\n self.authenticated = True", "response": "Authenticate to the specified user and store the token and service catalog information."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ngetting the authentication information from the returned JSON.", "response": "def _parse_response(self, resp):\n \"\"\"Gets the authentication information from the returned JSON.\"\"\"\n access = resp[\"access\"]\n token = access.get(\"token\")\n self.token = token[\"id\"]\n self.tenant_id = token[\"tenant\"][\"id\"]\n self.tenant_name = token[\"tenant\"][\"name\"]\n self.expires = self._parse_api_time(token[\"expires\"])\n self.service_catalog = access.get(\"serviceCatalog\")\n self._parse_service_catalog()\n user = access[\"user\"]\n self.user = {}\n self.user[\"id\"] = user[\"id\"]\n self.username = self.user[\"name\"] = user[\"name\"]\n self.user[\"roles\"] = user[\"roles\"]"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef keyring_auth(self, username=None):\n if not keyring:\n # Module not installed\n raise exc.KeyringModuleNotInstalled(\"The 'keyring' Python module \"\n \"is not installed on this system.\")\n if username is None:\n username = pyrax.get_setting(\"keyring_username\")\n if not username:\n raise exc.KeyringUsernameMissing(\"No username specified for \"\n \"keyring authentication.\")\n password = keyring.get_password(\"pyrax\", username)\n if password is None:\n raise exc.KeyringPasswordNotFound(\"No password was found for the \"\n \"username '%s'.\" % username)\n style = self._creds_style or self._default_creds_style\n # Keyring username may be different than the credentials. Use the\n # existing username, if present; otherwise, use the supplied username.\n username = self.username or username\n if style == \"apikey\":\n return self.authenticate(username=username, api_key=password)\n else:\n return self.authenticate(username=username, password=password)", "response": "Authenticate the user with the keyring."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nclearing out any credentials tokens and service catalog info.", "response": "def unauthenticate(self):\n \"\"\"\n Clears out any credentials, tokens, and service catalog info.\n \"\"\"\n self.username = \"\"\n self.password = \"\"\n self.tenant_id = \"\"\n self.tenant_name = \"\"\n self.token = \"\"\n self.expires = None\n self.region = \"\"\n self._creds_file = None\n self.api_key = \"\"\n self.services = utils.DotDict()\n self.regions = utils.DotDict()\n self.authenticated = False"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_token(self, force=False):\n self.authenticated = self._has_valid_token()\n if force or not self.authenticated:\n self.authenticate()\n return self.token", "response": "Returns the auth token if it is valid. If not calls the auth endpoint\n method to get a new token."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nchecks if the token is valid and has not expired.", "response": "def _has_valid_token(self):\n \"\"\"\n This only checks the token's existence and expiration. If it has been\n invalidated on the server, this method may indicate that the token is\n valid when it might actually not be.\n \"\"\"\n return bool(self.token and (self.expires > datetime.datetime.now()))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_tokens(self):\n resp, resp_body = self.method_get(\"tokens/%s\" % self.token, admin=True)\n if resp.status_code in (401, 403):\n raise exc.AuthorizationFailure(\"You must be an admin to make this \"\n \"call.\")\n return resp_body.get(\"access\")", "response": "Returns a dict containing the list of tokens endpoints user info and role metadata."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef check_token(self, token=None):\n if token is None:\n token = self.token\n resp, resp_body = self.method_head(\"tokens/%s\" % token, admin=True)\n if resp.status_code in (401, 403):\n raise exc.AuthorizationFailure(\"You must be an admin to make this \"\n \"call.\")\n return 200 <= resp.status_code < 300", "response": "Checks if the current token is valid. Returns True or False depending on whether the current token is valid."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nrevokes the specified token from the database. Returns True or False depending on whether deletion of the token was successful. Returns False if deletion of the token was unsuccessful. Returns True or False depending on whether deletion of the token was successful.", "response": "def revoke_token(self, token):\n \"\"\"\n ADMIN ONLY. Returns True or False, depending on whether deletion of the\n specified token was successful.\n \"\"\"\n resp, resp_body = self.method_delete(\"tokens/%s\" % token, admin=True)\n if resp.status_code in (401, 403):\n raise exc.AuthorizationFailure(\"You must be an admin to make this \"\n \"call.\")\n return 200 <= resp.status_code < 300"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef list_users(self):\n resp, resp_body = self.method_get(\"users\", admin=True)\n if resp.status_code in (401, 403, 404):\n raise exc.AuthorizationFailure(\"You are not authorized to list \"\n \"users.\")\n # The API is inconsistent; if only one user exists, it does not return\n # a list.\n if \"users\" in resp_body:\n users = resp_body[\"users\"]\n else:\n users = resp_body\n # The returned values may contain password data. Strip that out.\n for user in users:\n bad_keys = [key for key in list(user.keys())\n if \"password\" in key.lower()]\n for bad_key in bad_keys:\n user.pop(bad_key)\n return [User(self, user) for user in users]", "response": "Returns a list of users for the tenant."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncreates a new user in the tenant.", "response": "def create_user(self, name, email, password=None, enabled=True):\n \"\"\"\n ADMIN ONLY. Creates a new user for this tenant (account). The username\n and email address must be supplied. You may optionally supply the\n password for this user; if not, the API server generates a password and\n return it in the 'password' attribute of the resulting User object.\n NOTE: this is the ONLY time the password is returned; after the initial\n user creation, there is NO WAY to retrieve the user's password.\n\n You may also specify that the user should be created but not active by\n passing False to the enabled parameter.\n \"\"\"\n # NOTE: the OpenStack docs say that the name key in the following dict\n # is supposed to be 'username', but the service actually expects 'name'.\n data = {\"user\": {\n \"username\": name,\n \"email\": email,\n \"enabled\": enabled,\n }}\n if password:\n data[\"user\"][\"OS-KSADM:password\"] = password\n resp, resp_body = self.method_post(\"users\", data=data, admin=True)\n if resp.status_code == 201:\n return User(self, resp_body.get(\"user\", resp_body))\n elif resp.status_code in (401, 403, 404):\n raise exc.AuthorizationFailure(\"You are not authorized to create \"\n \"users.\")\n elif resp.status_code == 409:\n raise exc.DuplicateUser(\"User '%s' already exists.\" % name)\n elif resp.status_code == 400:\n message = resp_body[\"badRequest\"][\"message\"]\n if \"Expecting valid email address\" in message:\n raise exc.InvalidEmail(\"%s is not valid\" % email)\n else:\n raise exc.BadRequest(message)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nremove the user from the system.", "response": "def delete_user(self, user):\n \"\"\"\n ADMIN ONLY. Removes the user from the system. There is no 'undo'\n available, so you should be certain that the user specified is the user\n you wish to delete.\n \"\"\"\n user_id = utils.get_id(user)\n uri = \"users/%s\" % user_id\n resp, resp_body = self.method_delete(uri)\n if resp.status_code == 404:\n raise exc.UserNotFound(\"User '%s' does not exist.\" % user)\n elif resp.status_code in (401, 403):\n raise exc.AuthorizationFailure(\"You are not authorized to delete \"\n \"users.\")"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a list of roles for the specified user.", "response": "def list_roles_for_user(self, user):\n \"\"\"\n ADMIN ONLY. Returns a list of roles for the specified user. Each role\n will be a 3-tuple, consisting of (role_id, role_name,\n role_description).\n \"\"\"\n user_id = utils.get_id(user)\n uri = \"users/%s/roles\" % user_id\n resp, resp_body = self.method_get(uri)\n if resp.status_code in (401, 403):\n raise exc.AuthorizationFailure(\"You are not authorized to list \"\n \"user roles.\")\n roles = resp_body.get(\"roles\")\n return roles"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a user s non - password credentials.", "response": "def list_credentials(self, user=None):\n \"\"\"\n Returns a user's non-password credentials. If no user is specified, the\n credentials for the currently authenticated user are returned.\n\n You cannot retrieve passwords by this or any other means.\n \"\"\"\n if not user:\n user = self.user\n user_id = utils.get_id(user)\n uri = \"users/%s/OS-KSADM/credentials\" % user_id\n resp, resp_body = self.method_get(uri)\n return resp_body.get(\"credentials\")"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn either a list of all tenants or the currently - authenticated user.", "response": "def _list_tenants(self, admin):\n \"\"\"\n Returns either a list of all tenants (admin=True), or the tenant for\n the currently-authenticated user (admin=False).\n \"\"\"\n resp, resp_body = self.method_get(\"tenants\", admin=admin)\n if 200 <= resp.status_code < 300:\n tenants = resp_body.get(\"tenants\", [])\n return [Tenant(self, tenant) for tenant in tenants]\n elif resp.status_code in (401, 403):\n raise exc.AuthorizationFailure(\"You are not authorized to list \"\n \"tenants.\")\n else:\n raise exc.TenantNotFound(\"Could not get a list of tenants.\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_tenant(self, name, description=None, enabled=True):\n data = {\"tenant\": {\n \"name\": name,\n \"enabled\": enabled,\n }}\n if description:\n data[\"tenant\"][\"description\"] = description\n resp, resp_body = self.method_post(\"tenants\", data=data)\n return Tenant(self, resp_body)", "response": "Creates a new tenant."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nupdates an existing tenant.", "response": "def update_tenant(self, tenant, name=None, description=None, enabled=True):\n \"\"\"\n ADMIN ONLY. Updates an existing tenant.\n \"\"\"\n tenant_id = utils.get_id(tenant)\n data = {\"tenant\": {\n \"enabled\": enabled,\n }}\n if name:\n data[\"tenant\"][\"name\"] = name\n if description:\n data[\"tenant\"][\"description\"] = description\n resp, resp_body = self.method_put(\"tenants/%s\" % tenant_id, data=data)\n return Tenant(self, resp_body)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ndelete the tenant from the system.", "response": "def delete_tenant(self, tenant):\n \"\"\"\n ADMIN ONLY. Removes the tenant from the system. There is no 'undo'\n available, so you should be certain that the tenant specified is the\n tenant you wish to delete.\n \"\"\"\n tenant_id = utils.get_id(tenant)\n uri = \"tenants/%s\" % tenant_id\n resp, resp_body = self.method_delete(uri)\n if resp.status_code == 404:\n raise exc.TenantNotFound(\"Tenant '%s' does not exist.\" % tenant)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef list_roles(self, service_id=None, limit=None, marker=None):\n uri = \"OS-KSADM/roles\"\n pagination_items = []\n if service_id is not None:\n pagination_items.append(\"serviceId=%s\" % service_id)\n if limit is not None:\n pagination_items.append(\"limit=%s\" % limit)\n if marker is not None:\n pagination_items.append(\"marker=%s\" % marker)\n pagination = \"&\".join(pagination_items)\n if pagination:\n uri = \"%s?%s\" % (uri, pagination)\n resp, resp_body = self.method_get(uri)\n roles = resp_body.get(\"roles\", [])\n return [Role(self, role) for role in roles]", "response": "Returns a list of all global roles for users optionally limited by service."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning a Role object representing the specified parameter.", "response": "def get_role(self, role):\n \"\"\"\n Returns a Role object representing the specified parameter. The 'role'\n parameter can be either an existing Role object, or the ID of the role.\n\n If an invalid role is passed, a NotFound exception is raised.\n \"\"\"\n uri = \"OS-KSADM/roles/%s\" % utils.get_id(role)\n resp, resp_body = self.method_get(uri)\n role = Role(self, resp_body.get(\"role\"))\n return role"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_role_to_user(self, role, user):\n uri = \"users/%s/roles/OS-KSADM/%s\" % (utils.get_id(user),\n utils.get_id(role))\n resp, resp_body = self.method_put(uri)", "response": "Adds the specified role to the specified user."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete_role_from_user(self, role, user):\n uri = \"users/%s/roles/OS-KSADM/%s\" % (utils.get_id(user),\n utils.get_id(role))\n resp, resp_body = self.method_delete(uri)", "response": "Deletes the specified role from the specified user."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nparse the API time string and return a proper datetime object.", "response": "def _parse_api_time(timestr):\n \"\"\"\n Typical expiration times returned from the auth server are in this\n format:\n 2012-05-02T14:27:40.000-05:00\n They can also be returned as a UTC value in this format:\n 2012-05-02T14:27:40.000Z\n This method returns a proper datetime object from either of these\n formats.\n \"\"\"\n try:\n reg_groups = API_DATE_PATTERN.match(timestr).groups()\n yr, mth, dy, hr, mn, sc, off_sign, off_hr, off_mn = reg_groups\n except AttributeError:\n # UTC dates don't show offsets.\n utc_groups = UTC_API_DATE_PATTERN.match(timestr).groups()\n yr, mth, dy, hr, mn, sc = utc_groups\n off_sign = \"+\"\n off_hr = off_mn = 0\n base_dt = datetime.datetime(int(yr), int(mth), int(dy), int(hr),\n int(mn), int(sc))\n delta = datetime.timedelta(hours=int(off_hr), minutes=int(off_mn))\n if off_sign == \"+\":\n # Time is greater than UTC\n ret = base_dt - delta\n else:\n ret = base_dt + delta\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef delete(self):\n try:\n return super(CloudNetwork, self).delete()\n except exc.Forbidden as e:\n # Network is in use\n raise exc.NetworkInUse(\"Cannot delete a network in use by a server.\")", "response": "A method to delete the cloud network from the database."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncreate the dict required to create a network.", "response": "def _create_body(self, name, label=None, cidr=None):\n \"\"\"\n Used to create the dict required to create a network. Accepts either\n 'label' or 'name' as the keyword parameter for the label attribute.\n \"\"\"\n label = label or name\n body = {\"network\": {\n \"label\": label,\n \"cidr\": cidr,\n }}\n return body"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _configure_manager(self):\n self._manager = CloudNetworkManager(self, resource_class=CloudNetwork,\n response_key=\"network\", uri_base=\"os-networksv2\")", "response": "Creates the Manager instance to handle networks."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates a new isolated network.", "response": "def create(self, label=None, name=None, cidr=None):\n \"\"\"\n Wraps the basic create() call to handle specific failures.\n \"\"\"\n try:\n return super(CloudNetworkClient, self).create(label=label,\n name=name, cidr=cidr)\n except exc.BadRequest as e:\n msg = e.message\n if \"too many networks\" in msg:\n raise exc.NetworkCountExceeded(\"Cannot create network; the \"\n \"maximum number of isolated networks already exist.\")\n elif \"does not contain enough\" in msg:\n raise exc.NetworkCIDRInvalid(\"Networks must contain two or \"\n \"more hosts; the CIDR '%s' is too restrictive.\" % cidr)\n elif \"CIDR is malformed\" in msg:\n raise exc.NetworkCIDRMalformed(\"The CIDR '%s' is not valid.\" % cidr)\n else:\n # Something unexpected\n raise"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete(self, network):\n try:\n return super(CloudNetworkClient, self).delete(network)\n except exc.Forbidden as e:\n # Network is in use\n raise exc.NetworkInUse(\"Cannot delete a network in use by a server.\")", "response": "A method to delete a network from the cloud."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_server_networks(self, network, public=False, private=False,\n key=None):\n \"\"\"\n Creates the dict of network UUIDs required by Cloud Servers when\n creating a new server with isolated networks. By default, the UUID\n values are returned with the key of \"net-id\", which is what novaclient\n expects. Other tools may require different values, such as 'uuid'. If\n that is the case, pass the desired key as the 'key' parameter.\n\n By default only this network is included. If you wish to create a\n server that has either the public (internet) or private (ServiceNet)\n networks, you have to pass those parameters in with values of True.\n \"\"\"\n return _get_server_networks(network, public=public, private=private,\n key=key)", "response": "Returns a dict of network UUIDs required by Cloud Servers when creating a new server with isolated networks."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _openTypeOS2WidthClassFormatter(value):\n value = int(round2(value))\n if value > 9:\n value = 9\n elif value < 1:\n value = 1\n return value", "response": "Format value into OpenTypeOS2WidthClass."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nextract info from the otherInfoObject.", "response": "def extractInfo(self, otherInfoObject):\n \"\"\"\n >>> from fontMath.test.test_mathInfo import _TestInfoObject, _testData\n >>> from fontMath.mathFunctions import _roundNumber\n >>> info1 = MathInfo(_TestInfoObject())\n >>> info2 = info1 * 2.5\n >>> info3 = _TestInfoObject()\n >>> info2.extractInfo(info3)\n >>> written = {}\n >>> expected = {}\n >>> for attr, value in _testData.items():\n ... if value is None:\n ... continue\n ... written[attr] = getattr(info2, attr)\n ... if isinstance(value, list):\n ... expectedValue = [_roundNumber(v * 2.5) for v in value]\n ... elif isinstance(value, int):\n ... expectedValue = _roundNumber(value * 2.5)\n ... else:\n ... expectedValue = value * 2.5\n ... expected[attr] = expectedValue\n >>> sorted(expected) == sorted(written)\n True\n \"\"\"\n for attr, (formatter, factorIndex) in _infoAttrs.items():\n if hasattr(self, attr):\n v = getattr(self, attr)\n if v is not None:\n if formatter is not None:\n v = formatter(v)\n setattr(otherInfoObject, attr, v)\n if hasattr(self, \"postscriptWeightName\"):\n otherInfoObject.postscriptWeightName = self.postscriptWeightName"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\npairs anchors in a single node.", "response": "def _pairAnchors(anchorDict1, anchorDict2):\n \"\"\"\n Anchors are paired using the following rules:\n\n\n Matching Identifiers\n --------------------\n >>> anchors1 = {\n ... \"test\" : [\n ... (None, 1, 2, None),\n ... (\"identifier 1\", 3, 4, None)\n ... ]\n ... }\n >>> anchors2 = {\n ... \"test\" : [\n ... (\"identifier 1\", 1, 2, None),\n ... (None, 3, 4, None)\n ... ]\n ... }\n >>> expected = [\n ... (\n ... dict(name=\"test\", identifier=None, x=1, y=2, color=None),\n ... dict(name=\"test\", identifier=None, x=3, y=4, color=None)\n ... ),\n ... (\n ... dict(name=\"test\", identifier=\"identifier 1\", x=3, y=4, color=None),\n ... dict(name=\"test\", identifier=\"identifier 1\", x=1, y=2, color=None)\n ... )\n ... ]\n >>> _pairAnchors(anchors1, anchors2) == expected\n True\n\n Mismatched Identifiers\n ----------------------\n >>> anchors1 = {\n ... \"test\" : [\n ... (\"identifier 1\", 3, 4, None)\n ... ]\n ... }\n >>> anchors2 = {\n ... \"test\" : [\n ... (\"identifier 2\", 1, 2, None),\n ... ]\n ... }\n >>> expected = [\n ... (\n ... dict(name=\"test\", identifier=\"identifier 1\", x=3, y=4, color=None),\n ... dict(name=\"test\", identifier=\"identifier 2\", x=1, y=2, color=None)\n ... )\n ... ]\n >>> _pairAnchors(anchors1, anchors2) == expected\n True\n \"\"\"\n pairs = []\n for name, anchors1 in anchorDict1.items():\n if name not in anchorDict2:\n continue\n anchors2 = anchorDict2[name]\n # align with matching identifiers\n removeFromAnchors1 = []\n for anchor1 in anchors1:\n match = None\n identifier = anchor1[0]\n for anchor2 in anchors2:\n if anchor2[0] == identifier:\n match = anchor2\n break\n if match is not None:\n anchor2 = match\n anchors2.remove(anchor2)\n removeFromAnchors1.append(anchor1)\n a1 = dict(name=name, identifier=identifier)\n a1[\"x\"], a1[\"y\"], a1[\"color\"] = anchor1[1:]\n a2 = dict(name=name, identifier=identifier)\n a2[\"x\"], a2[\"y\"], a2[\"color\"] = anchor2[1:]\n pairs.append((a1, a2))\n for anchor1 in removeFromAnchors1:\n anchors1.remove(anchor1)\n if not anchors1 or not anchors2:\n continue\n # align by index\n while 1:\n anchor1 = anchors1.pop(0)\n anchor2 = anchors2.pop(0)\n a1 = dict(name=name)\n a1[\"identifier\"], a1[\"x\"], a1[\"y\"], a1[\"color\"] = anchor1\n a2 = dict(name=name, identifier=identifier)\n a2[\"identifier\"], a2[\"x\"], a2[\"y\"], a2[\"color\"] = anchor2\n pairs.append((a1, a2))\n if not anchors1:\n break\n if not anchors2:\n break\n return pairs"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns a copy of this MathGlyph without any MathSubObjects", "response": "def copyWithoutMathSubObjects(self):\n \"\"\"\n return a new MathGlyph containing all data except:\n contours\n components\n anchors\n guidelines\n\n this is used mainly for internal glyph math.\n \"\"\"\n n = MathGlyph(None)\n n.name = self.name\n if self.unicodes is not None:\n n.unicodes = list(self.unicodes)\n n.width = self.width\n n.height = self.height\n n.note = self.note\n n.lib = deepcopy(dict(self.lib))\n return n"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ndraw self using pointPen", "response": "def drawPoints(self, pointPen, filterRedundantPoints=False):\n \"\"\"draw self using pointPen\"\"\"\n if filterRedundantPoints:\n pointPen = FilterRedundantPointPen(pointPen)\n for contour in self.contours:\n pointPen.beginPath(identifier=contour[\"identifier\"])\n for segmentType, pt, smooth, name, identifier in contour[\"points\"]:\n pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier)\n pointPen.endPath()\n for component in self.components:\n pointPen.addComponent(component[\"baseGlyph\"], component[\"transformation\"], identifier=component[\"identifier\"])"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndraw self using pen", "response": "def draw(self, pen, filterRedundantPoints=False):\n \"\"\"draw self using pen\"\"\"\n from fontTools.pens.pointPen import PointToSegmentPen\n pointPen = PointToSegmentPen(pen)\n self.drawPoints(pointPen, filterRedundantPoints=filterRedundantPoints)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef extractGlyph(self, glyph, pointPen=None, onlyGeometry=False):\n if pointPen is None:\n pointPen = glyph.getPointPen()\n glyph.clearContours()\n glyph.clearComponents()\n glyph.clearAnchors()\n glyph.clearGuidelines()\n glyph.lib.clear()\n cleanerPen = FilterRedundantPointPen(pointPen)\n self.drawPoints(cleanerPen)\n glyph.anchors = [dict(anchor) for anchor in self.anchors]\n glyph.guidelines = [_compressGuideline(guideline) for guideline in self.guidelines]\n glyph.image = _compressImage(self.image)\n glyph.lib = deepcopy(dict(self.lib))\n glyph.width = self.width\n glyph.height = self.height\n glyph.note = self.note\n if not onlyGeometry:\n glyph.name = self.name\n glyph.unicodes = list(self.unicodes)\n return glyph", "response": "rehydrate to a glyph. this requires that a point pen other\n than the type of pen returned by glyph. getPointPen is required for drawing."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _flushContour(self):\n self.contours.append(\n dict(identifier=self._contourIdentifier, points=[])\n )\n contourPoints = self.contours[-1][\"points\"]\n points = self._points\n # move offcurves at the beginning of the contour to the end\n haveOnCurve = False\n for point in points:\n if point[0] is not None:\n haveOnCurve = True\n break\n if haveOnCurve:\n while 1:\n if points[0][0] is None:\n point = points.pop(0)\n points.append(point)\n else:\n break\n # convert lines to curves\n holdingOffCurves = []\n for index, point in enumerate(points):\n segmentType = point[0]\n if segmentType == \"line\":\n pt, smooth, name, identifier = point[1:]\n prevPt = points[index - 1][1]\n if index == 0:\n holdingOffCurves.append((None, prevPt, False, None, None))\n holdingOffCurves.append((None, pt, False, None, None))\n else:\n contourPoints.append((None, prevPt, False, None, None))\n contourPoints.append((None, pt, False, None, None))\n contourPoints.append((\"curve\", pt, smooth, name, identifier))\n else:\n contourPoints.append(point)\n contourPoints.extend(holdingOffCurves)", "response": "This function normalizes the contour so that the contour is not in the middle of the contour."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ntaking a 6 - tuple and return a ShallowTransform object.", "response": "def matrixToMathTransform(matrix):\n \"\"\" Take a 6-tuple and return a ShallowTransform object.\"\"\"\n if isinstance(matrix, ShallowTransform):\n return matrix\n off, scl, rot = MathTransform(matrix).decompose()\n return ShallowTransform(off, scl, rot)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ntaking a ShallowTransform object and return a 6 - tuple.", "response": "def mathTransformToMatrix(mathTransform):\n \"\"\" Take a ShallowTransform object and return a 6-tuple. \"\"\"\n m = MathTransform().compose(mathTransform.offset, mathTransform.scale, mathTransform.rotation)\n return tuple(m)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _linearInterpolationTransformMatrix(matrix1, matrix2, value):\n return tuple(_interpolateValue(matrix1[i], matrix2[i], value) for i in range(len(matrix1)))", "response": "Linear interpolation of the transform matrix."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _polarDecomposeInterpolationTransformation(matrix1, matrix2, value):\n m1 = MathTransform(matrix1)\n m2 = MathTransform(matrix2)\n return tuple(m1.interpolate(m2, value))", "response": "Interpolate using the PolarDecompose interpolation method."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _mathPolarDecomposeInterpolationTransformation(matrix1, matrix2, value):\n off, scl, rot = MathTransform(matrix1).decompose()\n m1 = ShallowTransform(off, scl, rot)\n off, scl, rot = MathTransform(matrix2).decompose()\n m2 = ShallowTransform(off, scl, rot)\n m3 = m1 + value * (m2-m1)\n m3 = MathTransform().compose(m3.offset, m3.scale, m3.rotation)\n return tuple(m3)", "response": "Interpolation with ShallowTransfor wrapped by decompose / compose actions."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\niterate over the asserted Facts.", "response": "def facts(self):\n \"\"\"Iterate over the asserted Facts.\"\"\"\n fact = lib.EnvGetNextFact(self._env, ffi.NULL)\n\n while fact != ffi.NULL:\n yield new_fact(self._env, fact)\n\n fact = lib.EnvGetNextFact(self._env, fact)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef templates(self):\n template = lib.EnvGetNextDeftemplate(self._env, ffi.NULL)\n\n while template != ffi.NULL:\n yield Template(self._env, template)\n\n template = lib.EnvGetNextDeftemplate(self._env, template)", "response": "Iterate over the defined Templates."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef find_template(self, name):\n deftemplate = lib.EnvFindDeftemplate(self._env, name.encode())\n if deftemplate == ffi.NULL:\n raise LookupError(\"Template '%s' not found\" % name)\n\n return Template(self._env, deftemplate)", "response": "Find the Template by its name."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nasserts a fact as string.", "response": "def assert_string(self, string):\n \"\"\"Assert a fact as string.\"\"\"\n fact = lib.EnvAssertString(self._env, string.encode())\n\n if fact == ffi.NULL:\n raise CLIPSError(self._env)\n\n return new_fact(self._env, fact)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef load_facts(self, facts):\n facts = facts.encode()\n\n if os.path.exists(facts):\n ret = lib.EnvLoadFacts(self._env, facts)\n if ret == -1:\n raise CLIPSError(self._env)\n else:\n ret = lib.EnvLoadFactsFromString(self._env, facts, -1)\n if ret == -1:\n raise CLIPSError(self._env)\n\n return ret", "response": "Load a set of facts into the CLIPS data base."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef save_facts(self, path, mode=SaveMode.LOCAL_SAVE):\n ret = lib.EnvSaveFacts(self._env, path.encode(), mode)\n if ret == -1:\n raise CLIPSError(self._env)\n\n return ret", "response": "Save the facts in the system to the specified file."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ntrues if the fact has been asserted within CLIPS.", "response": "def asserted(self):\n \"\"\"True if the fact has been asserted within CLIPS.\"\"\"\n # https://sourceforge.net/p/clipsrules/discussion/776945/thread/4f04bb9e/\n if self.index == 0:\n return False\n\n return bool(lib.EnvFactExistp(self._env, self._fact))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef assertit(self):\n if self.asserted:\n raise RuntimeError(\"Fact already asserted\")\n\n lib.EnvAssignFactSlotDefaults(self._env, self._fact)\n\n if lib.EnvAssert(self._env, self._fact) == ffi.NULL:\n raise CLIPSError(self._env)", "response": "Assert the fact within the CLIPS environment."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef retract(self):\n if lib.EnvRetract(self._env, self._fact) != 1:\n raise CLIPSError(self._env)", "response": "Retract the fact from the CLIPS environment."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef append(self, value):\n if self.asserted:\n raise RuntimeError(\"Fact already asserted\")\n\n self._multifield.append(value)", "response": "Append an element to the fact."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nappends multiple elements to the fact.", "response": "def extend(self, values):\n \"\"\"Append multiple elements to the fact.\"\"\"\n if self.asserted:\n raise RuntimeError(\"Fact already asserted\")\n\n self._multifield.extend(values)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef assertit(self):\n data = clips.data.DataObject(self._env)\n data.value = list(self._multifield)\n\n if lib.EnvPutFactSlot(\n self._env, self._fact, ffi.NULL, data.byref) != 1:\n raise CLIPSError(self._env)\n\n super(ImpliedFact, self).assertit()", "response": "Assert the fact within CLIPS."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nadding multiple elements to the fact.", "response": "def update(self, sequence=None, **mapping):\n \"\"\"Add multiple elements to the fact.\"\"\"\n if sequence is not None:\n if isinstance(sequence, dict):\n for slot in sequence:\n self[slot] = sequence[slot]\n else:\n for slot, value in sequence:\n self[slot] = value\n if mapping:\n for slot in sequence:\n self[slot] = sequence[slot]"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef watch(self, flag):\n lib.EnvSetDeftemplateWatch(self._env, int(flag), self._tpl)", "response": "Whether or not the Template is being watched."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\niterate over the Slots of the Template.", "response": "def slots(self):\n \"\"\"Iterate over the Slots of the Template.\"\"\"\n if self.implied:\n return ()\n\n data = clips.data.DataObject(self._env)\n\n lib.EnvDeftemplateSlotNames(self._env, self._tpl, data.byref)\n\n return tuple(\n TemplateSlot(self._env, self._tpl, n.encode()) for n in data.value)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncreate a new Fact from this template.", "response": "def new_fact(self):\n \"\"\"Create a new Fact from this template.\"\"\"\n fact = lib.EnvCreateFact(self._env, self._tpl)\n if fact == ffi.NULL:\n raise CLIPSError(self._env)\n\n return new_fact(self._env, fact)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef undefine(self):\n if lib.EnvUndeftemplate(self._env, self._tpl) != 1:\n raise CLIPSError(self._env)", "response": "Undefine the Template.\n\n Python equivalent of the CLIPS undeftemplate command."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ntrues if the slot is a multifield slot.", "response": "def multifield(self):\n \"\"\"True if the slot is a multifield slot.\"\"\"\n return bool(lib.EnvDeftemplateSlotMultiP(\n self._env, self._tpl, self._name))"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef range(self):\n data = clips.data.DataObject(self._env)\n\n lib.EnvDeftemplateSlotRange(\n self._env, self._tpl, self._name, data.byref)\n\n return tuple(data.value) if isinstance(data.value, list) else ()", "response": "A tuple containing the numeric range for this Slot."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef cardinality(self):\n data = clips.data.DataObject(self._env)\n\n lib.EnvDeftemplateSlotCardinality(\n self._env, self._tpl, self._name, data.byref)\n\n return tuple(data.value) if isinstance(data.value, list) else ()", "response": "A tuple containing the cardinality for this Slot."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef default_type(self):\n return TemplateSlotDefaultType(\n lib.EnvDeftemplateSlotDefaultP(self._env, self._tpl, self._name))", "response": "The default value type for this Slot."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef instances_changed(self):\n value = bool(lib.EnvGetInstancesChanged(self._env))\n lib.EnvSetInstancesChanged(self._env, int(False))\n\n return value", "response": "True if any instance has changed."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef classes(self):\n defclass = lib.EnvGetNextDefclass(self._env, ffi.NULL)\n\n while defclass != ffi.NULL:\n yield Class(self._env, defclass)\n\n defclass = lib.EnvGetNextDefclass(self._env, defclass)", "response": "Iterate over the defined Classes."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nfinds the Class by its name.", "response": "def find_class(self, name):\n \"\"\"Find the Class by its name.\"\"\"\n defclass = lib.EnvFindDefclass(self._env, name.encode())\n if defclass == ffi.NULL:\n raise LookupError(\"Class '%s' not found\" % name)\n\n return Class(self._env, defclass)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\niterating over the defined Instancees.", "response": "def instances(self):\n \"\"\"Iterate over the defined Instancees.\"\"\"\n definstance = lib.EnvGetNextInstance(self._env, ffi.NULL)\n\n while definstance != ffi.NULL:\n yield Instance(self._env, definstance)\n\n definstance = lib.EnvGetNextInstance(self._env, definstance)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nfinding the Instance by its name.", "response": "def find_instance(self, name, module=None):\n \"\"\"Find the Instance by its name.\"\"\"\n module = module if module is not None else ffi.NULL\n definstance = lib.EnvFindInstance(self._env, module, name.encode(), 1)\n if definstance == ffi.NULL:\n raise LookupError(\"Instance '%s' not found\" % name)\n\n return Instance(self._env, definstance)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef load_instances(self, instances):\n instances = instances.encode()\n\n if os.path.exists(instances):\n try:\n return self._load_instances_binary(instances)\n except CLIPSError:\n return self._load_instances_text(instances)\n else:\n return self._load_instances_string(instances)", "response": "Load a set of instances into the CLIPS data base."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nrestoring a set of instances into the CLIPS data base.", "response": "def restore_instances(self, instances):\n \"\"\"Restore a set of instances into the CLIPS data base.\n\n The Python equivalent of the CLIPS restore-instances command.\n\n Instances can be passed as a set of strings or as a file.\n\n \"\"\"\n instances = instances.encode()\n\n if os.path.exists(instances):\n ret = lib.EnvRestoreInstances(self._env, instances)\n if ret == -1:\n raise CLIPSError(self._env)\n else:\n ret = lib.EnvRestoreInstancesFromString(self._env, instances, -1)\n if ret == -1:\n raise CLIPSError(self._env)\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nsaving the instances in the system to the specified file.", "response": "def save_instances(self, path, binary=False, mode=SaveMode.LOCAL_SAVE):\n \"\"\"Save the instances in the system to the specified file.\n\n If binary is True, the instances will be saved in binary format.\n\n The Python equivalent of the CLIPS save-instances command.\n\n \"\"\"\n if binary:\n ret = lib.EnvBinarySaveInstances(self._env, path.encode(), mode)\n else:\n ret = lib.EnvSaveInstances(self._env, path.encode(), mode)\n if ret == 0:\n raise CLIPSError(self._env)\n\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef make_instance(self, command):\n ist = lib.EnvMakeInstance(self._env, command.encode())\n if ist == ffi.NULL:\n raise CLIPSError(self._env)\n\n return Instance(self._env, ist)", "response": "Create and initialize an instance of a user - defined class."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef watch_instances(self, flag):\n lib.EnvSetDefclassWatchInstances(self._env, int(flag), self._cls)", "response": "Whether or not the Class Instances are being watched."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates a new raw instance from this Class.", "response": "def new_instance(self, name):\n \"\"\"Create a new raw instance from this Class.\n\n No slot overrides or class default initializations\n are performed for the instance.\n\n This function bypasses message-passing.\n\n \"\"\"\n ist = lib.EnvCreateRawInstance(self._env, self._cls, name.encode())\n if ist == ffi.NULL:\n raise CLIPSError(self._env)\n\n return Instance(self._env, ist)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef find_message_handler(self, handler_name, handler_type='primary'):\n ret = lib.EnvFindDefmessageHandler(\n self._env, self._cls, handler_name.encode(), handler_type.encode())\n if ret == 0:\n raise CLIPSError(self._env)\n\n return MessageHandler(self._env, self._cls, ret)", "response": "Returns the MessageHandler given its name and type for this class."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef subclass(self, klass):\n return bool(lib.EnvSubclassP(self._env, self._cls, klass._cls))", "response": "True if the Class is a subclass of the given one."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef superclass(self, klass):\n return bool(lib.EnvSuperclassP(self._env, self._cls, klass._cls))", "response": "True if the Class is a superclass of the given one."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef slots(self, inherited=False):\n data = clips.data.DataObject(self._env)\n\n lib.EnvClassSlots(self._env, self._cls, data.byref, int(inherited))\n\n return (ClassSlot(self._env, self._cls, n.encode()) for n in data.value)", "response": "Iterate over the Slots of the class."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\niterating over the instances of the class.", "response": "def instances(self):\n \"\"\"Iterate over the instances of the class.\"\"\"\n ist = lib.EnvGetNextInstanceInClass(self._env, self._cls, ffi.NULL)\n\n while ist != ffi.NULL:\n yield Instance(self._env, ist)\n\n ist = lib.EnvGetNextInstanceInClass(self._env, self._cls, ist)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\niterating over the subclasses of the class.", "response": "def subclasses(self, inherited=False):\n \"\"\"Iterate over the subclasses of the class.\n\n This function is the Python equivalent\n of the CLIPS class-subclasses command.\n\n \"\"\"\n data = clips.data.DataObject(self._env)\n\n lib.EnvClassSubclasses(self._env, self._cls, data.byref, int(inherited))\n\n for klass in classes(self._env, data.value):\n yield klass"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef superclasses(self, inherited=False):\n data = clips.data.DataObject(self._env)\n\n lib.EnvClassSuperclasses(\n self._env, self._cls, data.byref, int(inherited))\n\n for klass in classes(self._env, data.value):\n yield klass", "response": "Iterate over the superclasses of the class."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\niterate over the message handlers of the class.", "response": "def message_handlers(self):\n \"\"\"Iterate over the message handlers of the class.\"\"\"\n index = lib.EnvGetNextDefmessageHandler(self._env, self._cls, 0)\n\n while index != 0:\n yield MessageHandler(self._env, self._cls, index)\n\n index = lib.EnvGetNextDefmessageHandler(self._env, self._cls, index)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ntrue if the Slot is public.", "response": "def public(self):\n \"\"\"True if the Slot is public.\"\"\"\n return bool(lib.EnvSlotPublicP(self._env, self._cls, self._name))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef initializable(self):\n return bool(lib.EnvSlotInitableP(self._env, self._cls, self._name))", "response": "True if the Slot is initializable."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef writable(self):\n return bool(lib.EnvSlotWritableP(self._env, self._cls, self._name))", "response": "True if the Slot is writable."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef accessible(self):\n return bool(lib.EnvSlotDirectAccessP(self._env, self._cls, self._name))", "response": "True if the Slot is directly accessible."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef types(self):\n data = clips.data.DataObject(self._env)\n\n lib.EnvSlotTypes(self._env, self._cls, self._name, data.byref)\n\n return tuple(data.value) if isinstance(data.value, list) else ()", "response": "A tuple containing the value types for this Slot."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef range(self):\n data = clips.data.DataObject(self._env)\n\n lib.EnvSlotRange(self._env, self._cls, self._name, data.byref)\n\n return tuple(data.value) if isinstance(data.value, list) else ()", "response": "A tuple containing the numeric range for this Slot."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\niterates over the allowed classes for this slot.", "response": "def allowed_classes(self):\n \"\"\"Iterate over the allowed classes for this slot.\n\n The Python equivalent of the CLIPS slot-allowed-classes function.\n\n \"\"\"\n data = clips.data.DataObject(self._env)\n\n lib.EnvSlotAllowedClasses(\n self._env, self._cls, self._name, data.byref)\n\n if isinstance(data.value, list):\n for klass in classes(self._env, data.value):\n yield klass"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nsend a message to the Instance.", "response": "def send(self, message, arguments=None):\n \"\"\"Send a message to the Instance.\n\n Message arguments must be provided as a string.\n\n \"\"\"\n output = clips.data.DataObject(self._env)\n instance = clips.data.DataObject(\n self._env, dtype=CLIPSType.INSTANCE_ADDRESS)\n instance.value = self._ist\n\n args = arguments.encode() if arguments is not None else ffi.NULL\n\n lib.EnvSend(\n self._env, instance.byref, message.encode(), args, output.byref)\n\n return output.value"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nunmakes the current instance.", "response": "def unmake(self):\n \"\"\"This method is equivalent to delete except that it uses\n message-passing instead of directly deleting the instance.\n\n \"\"\"\n if lib.EnvUnmakeInstance(self._env, self._ist) != 1:\n raise CLIPSError(self._env)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ntrues if the MessageHandler is being watched.", "response": "def watch(self):\n \"\"\"True if the MessageHandler is being watched.\"\"\"\n return bool(lib.EnvGetDefmessageHandlerWatch(\n self._env, self._cls, self._idx))"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ntruing if the MessageHandler is being watched.", "response": "def watch(self, flag):\n \"\"\"True if the MessageHandler is being watched.\"\"\"\n lib.EnvSetDefmessageHandlerWatch(\n self._env, int(flag), self._cls, self._idx)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef deletable(self):\n return bool(lib.EnvIsDefmessageHandlerDeletable(\n self._env, self._cls, self._idx))", "response": "True if the MessageHandler can be deleted."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef load(self, path):\n try:\n self._load_binary(path)\n except CLIPSError:\n self._load_text(path)", "response": "Load a set of constructs into the CLIPS data base."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nsaves a set of constructs into the CLIPS data base.", "response": "def save(self, path, binary=False):\n \"\"\"Save a set of constructs into the CLIPS data base.\n\n If binary is True, the constructs will be saved in binary format.\n\n The Python equivalent of the CLIPS load command.\n\n \"\"\"\n if binary:\n ret = lib.EnvBsave(self._env, path.encode())\n else:\n ret = lib.EnvSave(self._env, path.encode())\n if ret == 0:\n raise CLIPSError(self._env)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef batch_star(self, path):\n if lib.EnvBatchStar(self._env, path.encode()) != 1:\n raise CLIPSError(self._env)", "response": "Evaluate the commands contained in the specific path."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef build(self, construct):\n if lib.EnvBuild(self._env, construct.encode()) != 1:\n raise CLIPSError(self._env)", "response": "Build a single construct in CLIPS."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef eval(self, construct):\n data = clips.data.DataObject(self._env)\n\n if lib.EnvEval(self._env, construct.encode(), data.byref) != 1:\n raise CLIPSError(self._env)\n\n return data.value", "response": "Evaluate an expression returning its value."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef define_function(self, function, name=None):\n name = name if name is not None else function.__name__\n\n ENVIRONMENT_DATA[self._env].user_functions[name] = function\n\n self.build(DEFFUNCTION.format(name))", "response": "Define a Python function within the CLIPS environment."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef add_to_environment(self, environment):\n self._env = environment._env\n self._userdata = ffi.new_handle(self)\n\n ENVIRONMENT_DATA[self._env].routers[self.name] = self\n\n lib.EnvAddRouterWithContext(\n self._env, self._name.encode(), self._priority,\n lib.query_function,\n lib.print_function,\n lib.getc_function,\n lib.ungetc_function,\n lib.exit_function,\n self._userdata)", "response": "Add the router to the given environment."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the DATA_OBJECT stored value.", "response": "def value(self):\n \"\"\"Return the DATA_OBJECT stored value.\"\"\"\n dtype = lib.get_data_type(self._data)\n dvalue = lib.get_data_value(self._data)\n\n if dvalue == ffi.NULL:\n return None\n\n return self.python_value(dtype, dvalue)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef value(self, value):\n dtype = TYPES[type(value)] if self._type is None else self._type\n\n lib.set_data_type(self._data, dtype)\n lib.set_data_value(self._data, self.clips_value(value))", "response": "Sets the DATA_OBJECT stored value."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef python_value(self, dtype, dvalue):\n try:\n return CONVERTERS[dtype](dvalue)\n except KeyError:\n if dtype == clips.common.CLIPSType.MULTIFIELD:\n return self.multifield_to_list()\n if dtype == clips.common.CLIPSType.FACT_ADDRESS:\n return clips.facts.new_fact(self._env, lib.to_pointer(dvalue))\n if dtype == clips.common.CLIPSType.INSTANCE_ADDRESS:\n return clips.classes.Instance(self._env, lib.to_pointer(dvalue))\n\n return None", "response": "Convert a CLIPS type into a Python object."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nconvert a Python type into CLIPS.", "response": "def clips_value(self, dvalue):\n \"\"\"Convert a Python type into CLIPS.\"\"\"\n try:\n return VALUES[type(dvalue)](self._env, dvalue)\n except KeyError:\n if isinstance(dvalue, (list, tuple)):\n return self.list_to_multifield(dvalue)\n if isinstance(dvalue, (clips.facts.Fact)):\n return dvalue._fact\n if isinstance(dvalue, (clips.classes.Instance)):\n return dvalue._ist\n\n return ffi.NULL"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ntrue if any rule activation changes have occurred.", "response": "def agenda_changed(self):\n \"\"\"True if any rule activation changes have occurred.\"\"\"\n value = bool(lib.EnvGetAgendaChanged(self._env))\n lib.EnvSetAgendaChanged(self._env, int(False))\n\n return value"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\niterates over the defined Rules.", "response": "def rules(self):\n \"\"\"Iterate over the defined Rules.\"\"\"\n rule = lib.EnvGetNextDefrule(self._env, ffi.NULL)\n\n while rule != ffi.NULL:\n yield Rule(self._env, rule)\n\n rule = lib.EnvGetNextDefrule(self._env, rule)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef find_rule(self, rule):\n defrule = lib.EnvFindDefrule(self._env, rule.encode())\n if defrule == ffi.NULL:\n raise LookupError(\"Rule '%s' not found\" % defrule)\n\n return Rule(self._env, defrule)", "response": "Find a Rule by name."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef reorder(self, module=None):\n module = module._mdl if module is not None else ffi.NULL\n\n lib.EnvReorderAgenda(self._env, module)", "response": "Reorder Activations in the Agenda."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\niterating over the Activations in the Agenda.", "response": "def activations(self):\n \"\"\"Iterate over the Activations in the Agenda.\"\"\"\n activation = lib.EnvGetNextActivation(self._env, ffi.NULL)\n\n while activation != ffi.NULL:\n yield Activation(self._env, activation)\n\n activation = lib.EnvGetNextActivation(self._env, activation)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ndeleting all activations in the agenda.", "response": "def clear(self):\n \"\"\"Deletes all activations in the agenda.\"\"\"\n if lib.EnvDeleteActivation(self._env, ffi.NULL) != 1:\n raise CLIPSError(self._env)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef watch_firings(self, flag):\n lib.EnvSetDefruleWatchFirings(self._env, int(flag), self._rule)", "response": "Whether or not the Rule firings are being watched."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef watch_activations(self, flag):\n lib.EnvSetDefruleWatchActivations(self._env, int(flag), self._rule)", "response": "Whether or not the Rule Activations are being watched."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef matches(self, verbosity=Verbosity.TERSE):\n data = clips.data.DataObject(self._env)\n\n lib.EnvMatches(self._env, self._rule, verbosity, data.byref)\n\n return tuple(data.value)", "response": "Shows partial matches and activations."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef refresh(self):\n if lib.EnvRefresh(self._env, self._rule) != 1:\n raise CLIPSError(self._env)", "response": "Refresh the Rule.\n\n The Python equivalent of the CLIPS refresh command."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting the Activation salience value.", "response": "def salience(self, salience):\n \"\"\"Activation salience value.\"\"\"\n lib.EnvSetActivationSalience(self._env, self._act, salience)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef delete(self):\n if lib.EnvDeleteActivation(self._env, self._act) != 1:\n raise CLIPSError(self._env)\n\n self._env = None", "response": "Remove the activation from the agenda."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef globals_changed(self):\n value = bool(lib.EnvGetGlobalsChanged(self._env))\n lib.EnvSetGlobalsChanged(self._env, int(False))\n\n return value", "response": "True if any Global has changed."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef globals(self):\n defglobal = lib.EnvGetNextDefglobal(self._env, ffi.NULL)\n\n while defglobal != ffi.NULL:\n yield Global(self._env, defglobal)\n\n defglobal = lib.EnvGetNextDefglobal(self._env, defglobal)", "response": "Iterates over the defined Globals."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nfinds the Global by its name.", "response": "def find_global(self, name):\n \"\"\"Find the Global by its name.\"\"\"\n defglobal = lib.EnvFindDefglobal(self._env, name.encode())\n if defglobal == ffi.NULL:\n raise LookupError(\"Global '%s' not found\" % name)\n\n return Global(self._env, defglobal)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\niterate over the defined Modules.", "response": "def modules(self):\n \"\"\"Iterates over the defined Modules.\"\"\"\n defmodule = lib.EnvGetNextDefmodule(self._env, ffi.NULL)\n\n while defmodule != ffi.NULL:\n yield Module(self._env, defmodule)\n\n defmodule = lib.EnvGetNextDefmodule(self._env, defmodule)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nfind the Module by its name.", "response": "def find_module(self, name):\n \"\"\"Find the Module by its name.\"\"\"\n defmodule = lib.EnvFindDefmodule(self._env, name.encode())\n if defmodule == ffi.NULL:\n raise LookupError(\"Module '%s' not found\" % name)\n\n return Module(self._env, defmodule)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef module(self):\n modname = ffi.string(lib.EnvDefglobalModule(self._env, self._glb))\n defmodule = lib.EnvFindDefmodule(self._env, modname)\n\n return Module(self._env, defmodule)", "response": "The module in which the Global is defined."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef watch(self, flag):\n lib.EnvSetDefglobalWatch(self._env, int(flag), self._glb)", "response": "Whether or not the Global is being watched."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef undefine(self):\n if lib.EnvUndefglobal(self._env, self._glb) != 1:\n raise CLIPSError(self._env)\n\n self._env = None", "response": "Undefine the Global.\n\n Python equivalent of the CLIPS undefglobal command."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\niterate over the defined Globals.", "response": "def functions(self):\n \"\"\"Iterates over the defined Globals.\"\"\"\n deffunction = lib.EnvGetNextDeffunction(self._env, ffi.NULL)\n\n while deffunction != ffi.NULL:\n yield Function(self._env, deffunction)\n\n deffunction = lib.EnvGetNextDeffunction(self._env, deffunction)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef find_function(self, name):\n deffunction = lib.EnvFindDeffunction(self._env, name.encode())\n if deffunction == ffi.NULL:\n raise LookupError(\"Function '%s' not found\" % name)\n\n return Function(self._env, deffunction)", "response": "Find the Function by its name."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\niterate over the defined Generics.", "response": "def generics(self):\n \"\"\"Iterates over the defined Generics.\"\"\"\n defgeneric = lib.EnvGetNextDefgeneric(self._env, ffi.NULL)\n\n while defgeneric != ffi.NULL:\n yield Generic(self._env, defgeneric)\n\n defgeneric = lib.EnvGetNextDefgeneric(self._env, defgeneric)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nfinds the Generic by its name.", "response": "def find_generic(self, name):\n \"\"\"Find the Generic by its name.\"\"\"\n defgeneric = lib.EnvFindDefgeneric(self._env, name.encode())\n if defgeneric == ffi.NULL:\n raise LookupError(\"Generic '%s' not found\" % name)\n\n return Generic(self._env, defgeneric)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef watch(self, flag):\n lib.EnvSetDeffunctionWatch(self._env, int(flag), self._fnc)", "response": "Whether or not the Function is being watched."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef undefine(self):\n if lib.EnvUndefgeneric(self._env, self._gnc) != 1:\n raise CLIPSError(self._env)\n\n self._env = None", "response": "Undefine the Generic.\n Python equivalent of the CLIPS undefgeneric command."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning count of values deviating of the mean i. e. larger than multiplier * std.", "response": "def _get_deviation_of_mean(self, series, multiplier=3):\n \"\"\"\n Returns count of values deviating of the mean, i.e. larger than `multiplier` * `std`.\n :type series:\n :param multiplier:\n :return:\n \"\"\"\n capped_series = np.minimum(\n series, series.mean() + multiplier * series.std())\n count = pd.value_counts(series != capped_series)\n count = count[True] if True in count else 0\n perc = self._percent(count / self.length)\n return count, perc"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _get_median_absolute_deviation(self, series, multiplier=3):\n capped_series = np.minimum(\n series, series.median() + multiplier * series.mad())\n count = pd.value_counts(series != capped_series)\n count = count[True] if True in count else 0\n perc = self._percent(count / self.length)\n return count, perc", "response": "Returns the count of values larger than multiplier * mad"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_columns(self, df, usage, columns=None):\n columns_excluded = pd.Index([])\n columns_included = df.columns\n\n if usage == self.INCLUDE:\n try:\n columns_included = columns_included.intersection(pd.Index(columns))\n except TypeError:\n pass\n elif usage == self.EXCLUDE:\n try:\n columns_excluded = columns_excluded.union(pd.Index(columns))\n except TypeError:\n pass\n\n columns_included = columns_included.difference(columns_excluded)\n return columns_included.intersection(df.columns)", "response": "Returns a data_frame. columns."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfinds clusters in an array using a Gaussian Mixture Model. Before clustering, `data` can be automatically rescaled as specified by the `scale` argument. Parameters ---------- data : FCSData or array_like Data to cluster. n_clusters : int Number of clusters to find. tol : float, optional Tolerance for convergence. Directly passed to either ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s version. min_covar : float, optional The minimum trace that the initial covariance matrix will have. If ``scikit-learn``'s version is older than 0.18, `min_covar` is also passed directly to ``GMM``. scale : str, optional Rescaling applied to `data` before performing clustering. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- labels : array Nx1 array with labels for each element in `data`, assigning ``data[i]`` to cluster ``labels[i]``. Notes ----- A Gaussian Mixture Model finds clusters by fitting a linear combination of `n_clusters` Gaussian probability density functions (pdf) to `data` using Expectation Maximization (EM). This method can be fairly sensitive to the initial parameter choice. To generate a reasonable set of initial conditions, `clustering_gmm` first divides all points in `data` into `n_clusters` groups of the same size based on their Euclidean distance to the minimum value. Then, for each group, the 50% samples farther away from the mean are discarded. The mean and covariance are calculated from the remaining samples of each group, and used as initial conditions for the GMM EM algorithm. `clustering_gmm` internally uses a `GaussianMixture` object from the ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is lower than 0.18), with full covariance matrices for each cluster. For more information, consult ``scikit-learn``'s documentation.", "response": "def clustering_gmm(data,\n n_clusters,\n tol=1e-7,\n min_covar=None,\n scale='logicle'):\n \"\"\"\n Find clusters in an array using a Gaussian Mixture Model.\n\n Before clustering, `data` can be automatically rescaled as specified by\n the `scale` argument.\n\n Parameters\n ----------\n data : FCSData or array_like\n Data to cluster.\n n_clusters : int\n Number of clusters to find.\n tol : float, optional\n Tolerance for convergence. Directly passed to either\n ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s\n version.\n min_covar : float, optional\n The minimum trace that the initial covariance matrix will have. If\n ``scikit-learn``'s version is older than 0.18, `min_covar` is also\n passed directly to ``GMM``.\n scale : str, optional\n Rescaling applied to `data` before performing clustering. Can be\n either ``linear`` (no rescaling), ``log``, or ``logicle``.\n\n Returns\n -------\n labels : array\n Nx1 array with labels for each element in `data`, assigning\n ``data[i]`` to cluster ``labels[i]``.\n\n Notes\n -----\n A Gaussian Mixture Model finds clusters by fitting a linear combination\n of `n_clusters` Gaussian probability density functions (pdf) to `data`\n using Expectation Maximization (EM).\n\n This method can be fairly sensitive to the initial parameter choice. To\n generate a reasonable set of initial conditions, `clustering_gmm`\n first divides all points in `data` into `n_clusters` groups of the\n same size based on their Euclidean distance to the minimum value. Then,\n for each group, the 50% samples farther away from the mean are\n discarded. The mean and covariance are calculated from the remaining\n samples of each group, and used as initial conditions for the GMM EM\n algorithm.\n\n `clustering_gmm` internally uses a `GaussianMixture` object from the\n ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is\n lower than 0.18), with full covariance matrices for each cluster. For\n more information, consult ``scikit-learn``'s documentation.\n\n \"\"\"\n\n # Initialize min_covar parameter\n # Parameter is initialized differently depending on scikit's version\n if min_covar is None:\n if packaging.version.parse(sklearn.__version__) \\\n >= packaging.version.parse('0.18'):\n min_covar = 1e-3\n else:\n min_covar = 5e-5\n\n # Copy events before rescaling\n data = data.copy()\n\n # Apply rescaling\n if scale=='linear':\n # No rescaling\n pass\n elif scale=='log':\n # Logarithm of zero and negatives is undefined. Therefore, saturate\n # any non-positives to a small positive value.\n # The machine epsilon `eps` is the smallest number such that\n # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.\n data[data < 1e-15] = 1e-15\n # Rescale\n data = np.log10(data)\n elif scale=='logicle':\n # Use the logicle transform class in the plot module, and transform\n # data one channel at a time.\n for ch in range(data.shape[1]):\n # We need a transformation from \"data value\" to \"display scale\"\n # units. To do so, we use an inverse logicle transformation.\n t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted()\n data[:,ch] = t.transform_non_affine(data[:,ch],\n mask_out_of_range=False)\n else:\n raise ValueError(\"scale {} not supported\".format(scale))\n\n ###\n # Parameter initialization\n ###\n weights = np.tile(1.0 / n_clusters, n_clusters)\n means = []\n covars = []\n\n # Calculate distance to minimum value. Then, sort based on this distance.\n dist = np.sum((data - np.min(data, axis=0))**2., axis=1)\n sorted_idx = np.argsort(dist)\n\n # Expected number of elements per cluster\n n_per_cluster = data.shape[0]/float(n_clusters)\n\n # Get means and covariances per cluster\n # We will just use a fraction of ``1 - discard_frac`` of the data.\n # Data at the edges that actually corresponds to another cluster can\n # really mess up the final result.\n discard_frac = 0.5\n for i in range(n_clusters):\n il = int((i + discard_frac/2)*n_per_cluster)\n ih = int((i + 1 - discard_frac/2)*n_per_cluster)\n sorted_idx_cluster = sorted_idx[il:ih]\n data_cluster = data[sorted_idx_cluster]\n # Calculate means and covariances\n means.append(np.mean(data_cluster, axis=0))\n if data.shape[1] == 1:\n cov = np.cov(data_cluster.T).reshape(1,1)\n else:\n cov = np.cov(data_cluster.T)\n # Add small number to diagonal to avoid near-singular covariances\n cov += np.eye(data.shape[1]) * min_covar\n covars.append(cov)\n # Means should be an array\n means = np.array(means)\n\n ###\n # Run Gaussian Mixture Model Clustering\n ###\n\n if packaging.version.parse(sklearn.__version__) \\\n >= packaging.version.parse('0.18'):\n\n # GaussianMixture uses precisions, the inverse of covariances.\n # To get the inverse, we solve the linear equation C*P = I. We also\n # use the fact that C is positive definite.\n precisions = [scipy.linalg.solve(c,\n np.eye(c.shape[0]),\n assume_a='pos')\n for c in covars]\n precisions = np.array(precisions)\n\n # Initialize GaussianMixture object\n gmm = GaussianMixture(n_components=n_clusters,\n tol=tol,\n covariance_type='full',\n weights_init=weights,\n means_init=means,\n precisions_init=precisions,\n max_iter=500)\n\n else:\n # Initialize GMM object\n gmm = GMM(n_components=n_clusters,\n tol=tol,\n min_covar=min_covar,\n covariance_type='full',\n params='mc',\n init_params='')\n\n # Set initial parameters\n gmm.weight_ = weights\n gmm.means_ = means\n gmm.covars_ = covars\n\n # Fit \n gmm.fit(data)\n\n # Get labels by sampling from the responsibilities\n # This avoids the complete elimination of a cluster if two or more \n # clusters have very similar means.\n resp = gmm.predict_proba(data)\n labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp]\n\n return labels"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nselect populations that are in the specified range and have a standard deviation of the most of their elements between two values.", "response": "def selection_std(populations,\n low=None,\n high=None,\n n_std_low=2.5,\n n_std_high=2.5,\n scale='logicle'):\n \"\"\"\n Select populations if most of their elements are between two values.\n\n This function selects populations from `populations` if their means are\n more than `n_std_low` standard deviations greater than `low` and\n `n_std_high` standard deviations lower than `high`.\n\n Optionally, all elements in `populations` can be rescaled as specified\n by the `scale` argument before calculating means and standard\n deviations.\n\n Parameters\n ----------\n populations : list of 1D arrays or 1-channel FCSData objects\n Populations to select or discard.\n low, high : int or float\n Low and high thresholds. Required if the elements in `populations`\n are numpy arrays. If not specified, and the elements in\n `populations` are FCSData objects, use 1.5% and 98.5% of the range\n in ``populations[0].range``.\n n_std_low, n_std_high : float, optional\n Number of standard deviations from `low` and `high`, respectively,\n that a population's mean has to be closer than to be discarded.\n scale : str, optional\n Rescaling applied to `populations` before calculating means and\n standard deviations. Can be either ``linear`` (no rescaling),\n ``log``, or ``logicle``.\n\n Returns\n -------\n selected_mask : boolean array\n Flags indicating whether a population has been selected.\n\n \"\"\"\n\n # Generate scaling functions\n if scale == 'linear':\n # Identity function\n sf = lambda x: x\n elif scale == 'log':\n sf = np.log10\n elif scale == 'logicle':\n # We need a transformation from \"data value\" to \"display scale\"\n # units. To do so, we use an inverse logicle transformation.\n t = FlowCal.plot._LogicleTransform(data=populations[0],\n channel=0).inverted()\n sf = lambda x: t.transform_non_affine(x, mask_out_of_range=False)\n else:\n raise ValueError(\"scale {} not supported\".format(scale))\n\n # If thresholds were provided, apply scaling function. Else, obtain and\n # rescale thresholds from range.\n if low is None:\n if hasattr(populations[0], 'hist_bins'):\n # Obtain default thresholds from range\n r = populations[0].range(channels=0)\n # If using log scale and the lower limit is non-positive, change to\n # a very small positive number.\n # The machine epsilon `eps` is the smallest number such that\n # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.\n if scale == 'log' and r[0] <= 0:\n r[0] = 1e-15\n low = sf(r[0]) + 0.015*(sf(r[1]) - sf(r[0]))\n else:\n raise TypeError(\"argument 'low' not specified\")\n else:\n low = sf(low)\n if high is None:\n if hasattr(populations[0], 'hist_bins'):\n # Obtain default thresholds from range\n r = populations[0].range(channels=0)\n # If using log scale and the lower limit is non-positive, change to\n # a very small positive number.\n # The machine epsilon `eps` is the smallest number such that\n # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.\n if scale == 'log' and r[0] <= 0:\n r[0] = 1e-15\n high = sf(r[0]) + 0.985*(sf(r[1]) - sf(r[0]))\n else:\n raise TypeError(\"argument 'high' not specified\")\n else:\n high = sf(high)\n\n # Copy events\n for i in range(len(populations)):\n populations[i] = populations[i].copy()\n\n # For log scaling, logarithm of zero and negatives is undefined. Therefore,\n # saturate any non-positives to a small positive value.\n # The machine epsilon `eps` is the smallest number such that\n # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.\n if scale == 'log':\n for p in populations:\n p[p < 1e-15] = 1e-15\n\n # Rescale events\n for i in range(len(populations)):\n populations[i] = sf(populations[i])\n\n # Calculate means and standard deviations\n pop_mean = np.array([FlowCal.stats.mean(p) for p in populations])\n pop_std = np.array([FlowCal.stats.std(p) for p in populations])\n\n # Some populations, especially the highest ones when they are near\n # saturation, tend to aggregate mostly on one bin and give a standard\n # deviation of almost zero. This is an effect of the finite bin resolution\n # and probably gives a bad estimate of the standard deviation. We choose\n # to be conservative and overestimate the standard deviation in these\n # cases. Therefore, we set the minimum standard deviation to 0.005.\n min_std = 0.005\n pop_std[pop_std < min_std] = min_std\n\n # Return populations that don't cross either threshold\n selected_mask = np.logical_and(\n (pop_mean - n_std_low*pop_std) > low,\n (pop_mean + n_std_high*pop_std) < high)\n return selected_mask"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nfits a standard curve using a beads model with autofluorescence.", "response": "def fit_beads_autofluorescence(fl_rfi, fl_mef):\n \"\"\"\n Fit a standard curve using a beads model with autofluorescence.\n\n Parameters\n ----------\n fl_rfi : array\n Fluorescence values of bead populations in units of Relative\n Fluorescence Intensity (RFI).\n fl_mef : array\n Fluorescence values of bead populations in MEF units.\n\n Returns\n -------\n std_crv : function\n Standard curve that transforms fluorescence values from RFI to MEF\n units. This function has the signature ``y = std_crv(x)``, where\n `x` is some fluorescence value in RFI and `y` is the same\n fluorescence expressed in MEF units.\n beads_model : function\n Fluorescence model of calibration beads. This function has the\n signature ``y = beads_model(x)``, where `x` is the fluorescence of\n some bead population in RFI units and `y` is the same fluorescence\n expressed in MEF units, without autofluorescence.\n beads_params : array\n Fitted parameters of the bead fluorescence model: ``[m, b,\n fl_mef_auto]``.\n beads_model_str : str\n String representation of the beads model used.\n beads_params_names : list of str\n Names of the parameters in a list, in the same order as they are\n given in `beads_params`.\n\n Notes\n -----\n The following model is used to describe bead fluorescence::\n\n m*log(fl_rfi[i]) + b = log(fl_mef_auto + fl_mef[i])\n\n where ``fl_rfi[i]`` is the fluorescence of bead subpopulation ``i`` in\n RFI units and ``fl_mef[i]`` is the corresponding fluorescence in MEF\n units. The model includes 3 parameters: ``m`` (slope), ``b``\n (intercept), and ``fl_mef_auto`` (bead autofluorescence). The last term\n is constrained to be greater or equal to zero.\n\n The bead fluorescence model is fit in log space using nonlinear least\n squares regression. In our experience, fitting in log space weights\n the residuals more evenly, whereas fitting in linear space vastly\n overvalues the brighter beads.\n\n A standard curve is constructed by solving for ``fl_mef``. As cell\n samples may not have the same autofluorescence as beads, the bead\n autofluorescence term (``fl_mef_auto``) is omitted from the standard\n curve; the user is expected to use an appropriate white cell sample to\n account for cellular autofluorescence if necessary. The returned\n standard curve mapping fluorescence in RFI units to MEF units is thus\n of the following form::\n\n fl_mef = exp(m*log(fl_rfi) + b)\n\n This is equivalent to::\n\n fl_mef = exp(b) * (fl_rfi**m)\n\n This works for positive ``fl_rfi`` values, but it is undefined for\n ``fl_rfi < 0`` and non-integer ``m`` (general case).\n\n To extend this standard curve to negative values of ``fl_rfi``, we\n define ``s(fl_rfi)`` to be equal to the standard curve above when\n ``fl_rfi >= 0``. Next, we require this function to be odd, that is,\n ``s(fl_rfi) = - s(-fl_rfi)``. This extends the domain to negative\n ``fl_rfi`` values and results in ``s(fl_rfi) < 0`` for any negative\n ``fl_rfi``. Finally, we make ``fl_mef = s(fl_rfi)`` our new\n standard curve. In this way,::\n\n s(fl_rfi) = exp(b) * ( fl_rfi **m), fl_rfi >= 0\n - exp(b) * ((-fl_rfi)**m), fl_rfi < 0\n\n This satisfies the definition of an odd function. In addition,\n ``s(0) = 0``, and ``s(fl_rfi)`` converges to zero when ``fl_rfi -> 0``\n from both sides. Therefore, the function is continuous at\n ``fl_rfi = 0``. The definition of ``s(fl_rfi)`` can be expressed more\n conveniently as::\n\n s(fl_rfi) = sign(fl_rfi) * exp(b) * (abs(fl_rfi)**m)\n\n This is the equation implemented.\n\n \"\"\"\n # Check that the input data has consistent dimensions\n if len(fl_rfi) != len(fl_mef):\n raise ValueError(\"fl_rfi and fl_mef have different lengths\")\n # Check that we have at least three points\n if len(fl_rfi) <= 2:\n raise ValueError(\"standard curve model requires at least three \"\n \"values\")\n \n # Initialize parameters\n params = np.zeros(3)\n # Initial guesses:\n # 0: slope found by putting a line through the highest two points.\n # 1: y-intercept found by putting a line through highest two points.\n # 2: bead autofluorescence initialized using the first point.\n params[0] = (np.log(fl_mef[-1]) - np.log(fl_mef[-2])) / \\\n (np.log(fl_rfi[-1]) - np.log(fl_rfi[-2]))\n params[1] = np.log(fl_mef[-1]) - params[0] * np.log(fl_rfi[-1])\n params[2] = np.exp(params[0]*np.log(fl_rfi[0]) + params[1]) - fl_mef[0]\n\n # Error function\n def err_fun(p, x, y):\n return np.sum((np.log(y + p[2]) - ( p[0] * np.log(x) + p[1] ))**2)\n\n # Bead model function\n def fit_fun(p,x):\n return np.exp(p[0] * np.log(x) + p[1]) - p[2]\n\n # RFI-to-MEF standard curve transformation function\n def sc_fun(p,x):\n return np.sign(x) * np.exp(p[1]) * (np.abs(x)**p[0])\n \n # Fit parameters\n err_par = lambda p: err_fun(p, fl_rfi, fl_mef)\n res = minimize(err_par,\n params,\n bounds=((None, None), (None, None), (0, None)),\n options = {'gtol': 1e-10, 'ftol': 1e-10})\n\n # Separate parameters\n beads_params = res.x\n\n # Beads model function\n beads_model = lambda x: fit_fun(beads_params, x)\n\n # Standard curve function\n std_crv = lambda x: sc_fun(beads_params, x)\n\n # Model string representation\n beads_model_str = 'm*log(fl_rfi) + b = log(fl_mef_auto + fl_mef)'\n\n # Parameter names\n beads_params_names = ['m', 'b', 'fl_mef_auto']\n \n return (std_crv,\n beads_model,\n beads_params,\n beads_model_str,\n beads_params_names)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nplot a standard curve with the fluorescence of the calibration beads and standard curve.", "response": "def plot_standard_curve(fl_rfi,\n fl_mef,\n beads_model,\n std_crv,\n xscale='linear',\n yscale='linear',\n xlim=None,\n ylim=(1.,1e8)):\n \"\"\"\n Plot a standard curve with fluorescence of calibration beads.\n\n Parameters\n ----------\n fl_rfi : array_like\n Fluorescence of the calibration beads' subpopulations, in RFI\n units.\n fl_mef : array_like\n Fluorescence of the calibration beads' subpopulations, in MEF\n units.\n beads_model : function\n Fluorescence model of the calibration beads.\n std_crv : function\n The standard curve, mapping relative fluorescence (RFI) units to\n MEF units.\n\n Other Parameters\n ----------------\n xscale : str, optional\n Scale of the x axis, either ``linear`` or ``log``.\n yscale : str, optional\n Scale of the y axis, either ``linear`` or ``log``.\n xlim : tuple, optional\n Limits for the x axis.\n ylim : tuple, optional\n Limits for the y axis.\n\n \"\"\"\n # Plot fluorescence of beads populations\n plt.plot(fl_rfi,\n fl_mef,\n 'o',\n label='Beads',\n color=standard_curve_colors[0])\n\n # Generate points in x axis to plot beads model and standard curve.\n if xlim is None:\n xlim = plt.xlim()\n if xscale=='linear':\n xdata = np.linspace(xlim[0], xlim[1], 200)\n elif xscale=='log':\n xdata = np.logspace(np.log10(xlim[0]), np.log10(xlim[1]), 200)\n\n # Plot beads model and standard curve\n plt.plot(xdata,\n beads_model(xdata),\n label='Beads model',\n color=standard_curve_colors[1])\n plt.plot(xdata,\n std_crv(xdata),\n label='Standard curve',\n color=standard_curve_colors[2])\n\n plt.xscale(xscale)\n plt.yscale(yscale)\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.grid(True)\n plt.legend(loc = 'best')"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_transform_fxn(data_beads,\n mef_values,\n mef_channels,\n clustering_fxn=clustering_gmm,\n clustering_params={},\n clustering_channels=None,\n statistic_fxn=FlowCal.stats.median,\n statistic_params={},\n selection_fxn=selection_std,\n selection_params={},\n fitting_fxn=fit_beads_autofluorescence,\n fitting_params={},\n verbose=False,\n plot=False,\n plot_dir=None,\n plot_filename=None,\n full_output=False):\n \"\"\"\n Get a transformation function to convert flow cytometry data to MEF.\n\n Parameters\n ----------\n data_beads : FCSData object\n Flow cytometry data describing calibration beads.\n mef_values : sequence of sequences\n Known MEF values for the calibration bead subpopulations, for each\n channel specified in `mef_channels`. The innermost sequences must\n have the same length (the same number of bead subpopulations must\n exist for each channel).\n mef_channels : int, or str, or list of int, or list of str\n Channels for which to generate transformation functions.\n verbose : bool, optional\n Flag specifying whether to print information about step completion\n and warnings.\n plot : bool, optional\n Flag specifying whether to produce diagnostic plots.\n plot_dir : str, optional\n Directory where to save diagnostics plots. Ignored if `plot` is\n False. If ``plot==True`` and ``plot_dir is None``, plot without\n saving.\n plot_filename : str, optional\n Name to use for plot files. If None, use ``str(data_beads)``.\n full_output : bool, optional\n Flag specifying whether to include intermediate results in the\n output. If `full_output` is True, the function returns a\n `MEFOutput` ``namedtuple`` with fields as described below. If\n `full_output` is False, the function only returns the calculated\n transformation function.\n\n Returns\n -------\n transform_fxn : function\n Transformation function to convert flow cytometry data from RFI\n units to MEF. This function has the following signature::\n\n data_mef = transform_fxn(data_rfi, channels)\n\n mef_channels : int, or str, or list, only if ``full_output==True``\n Channels on which the transformation function has been generated.\n Directly copied from the `mef_channels` argument.\n\n clustering : dict, only if ``full_output==True``\n Results of the clustering step. The structure of this dictionary\n is::\n\n clustering = {\"labels\": np.array}\n\n A description of each ``\"key\": value`` is given below.\n\n \"labels\" : array\n Array of length ``N``, where ``N`` is the number of events in\n `data_beads`. This array contains labels indicating which\n subpopulation each event has been assigned to by the clustering\n algorithm. Labels range from ``0`` to ``M - 1``, where ``M`` is\n the number of MEF values specified, and therefore the number of\n subpopulations identified by the clustering algorithm.\n\n statistic : dict, only if ``full_output==True``\n Results of the calculation of bead subpopulations' fluorescence.\n The structure of this dictionary is::\n\n statistic = {\"values\": [np.array, ...]}\n\n A description of each ``\"key\": value`` is given below.\n\n \"values\" : list of arrays\n Each array contains the representative fluorescence values of\n all subpopulations, for a specific fluorescence channel from\n `mef_channels`. Therefore, each array has a length equal to the\n number of subpopulations, and the outer list has as many arrays\n as the number of channels in `mef_channels`.\n\n selection : dict, only if ``full_output==True``\n Results of the subpopulation selection step. The structure of this\n dictionary is::\n\n selection = {\"rfi\": [np.array, ...],\n \"mef\": [np.array, ...]}\n\n A description of each ``\"key\": value`` is given below.\n\n \"rfi\" : list of arrays\n Each array contains the fluorescence values of each selected\n subpopulation in RFI units, for a specific fluorescence channel\n from `mef_channels`. The outer list has as many arrays as the\n number of channels in `mef_channels`. Because the selection\n step may discard subpopulations, each array has a length less\n than or equal to the total number of subpopulations.\n Furthermore, different arrays in this list may not have the\n same length. However, the length of each array is consistent\n with the corresponding array in ``selection[\"mef\"]`` (see\n below).\n \"mef\" : list of arrays\n Each array contains the fluorescence values of each selected\n subpopulation in MEF units, for a specific fluorescence channel\n from `mef_channels`. The outer list has as many arrays as the\n number of channels in `mef_channels`. Because the selection\n step may discard subpopulations, each array has a length less\n than or equal to the total number of subpopulations.\n Furthermore, different arrays in this list may not have the\n same length. However, the length of each array is consistent\n with the corresponding array in ``selection[\"rfi\"]`` (see\n above).\n\n fitting : dict, only if ``full_output==True``\n Results of the model fitting step. The structure of this dictionary\n is::\n\n selection = {\"std_crv\": [func, ...],\n \"beads_model\": [func, ...],\n \"beads_params\": [np.array, ...],\n \"beads_model_str\": [str, ...],\n \"beads_params_names\": [[], ...]}\n\n A description of each ``\"key\": value`` is given below.\n\n \"std_crv\" : list of functions\n Functions encoding the fitted standard curves, for each channel\n in `mef_channels`. Each element of this list is the ``std_crv``\n output of the fitting function (see required signature of the\n ``fitting_fxn`` optional parameter), after applying it to the\n MEF and RFI fluorescence values of a specific channel from\n `mef_channels` .\n \"beads_model\" : list of functions\n Functions encoding the fluorescence model of the calibration\n beads, for each channel in `mef_channels`. Each element of this\n list is the ``beads_model`` output of the fitting function (see\n required signature of the ``fitting_fxn`` optional parameter),\n after applying it to the MEF and RFI fluorescence values of a\n specific channel from `mef_channels` .\n \"beads_params\" : list of arrays\n Fitted parameter values of the bead fluorescence model, for\n each channel in `mef_chanels`. Each element of this list is the\n ``beads_params`` output of the fitting function (see required\n signature of the ``fitting_fxn`` optional parameter), after\n applying it to the MEF and RFI fluorescence values of a\n specific channel from `mef_channels`.\n \"beads_model_str\" : list of str\n String representation of the bead models used, for each channel\n in `mef_channels`. Each element of this list is the\n ``beads_model_str`` output of the fitting function (see\n required signature of the ``fitting_fxn`` optional parameter),\n after applying it to the MEF and RFI fluorescence values of a\n specific channel from `mef_channels` .\n \"beads_params_names\" : list of list\n Names of the parameters given in `beads_params`, for each\n channel in `mef_channels`. Each element of this list is the\n ``beads_params_names`` output of the fitting function (see\n required signature of the ``fitting_fxn`` optional parameter),\n after applying it to the MEF and RFI fluorescence values of a\n specific channel from `mef_channels` .\n\n Other parameters\n ----------------\n clustering_fxn : function, optional\n Function used for clustering, or identification of subpopulations.\n Must have the following signature::\n\n labels = clustering_fxn(data, n_clusters, **clustering_params)\n\n where `data` is a NxD FCSData object or numpy array, `n_clusters`\n is the expected number of bead subpopulations, and `labels` is a 1D\n numpy array of length N, assigning each event in `data` to one\n subpopulation.\n\n clustering_params : dict, optional\n Additional keyword parameters to pass to `clustering_fxn`.\n\n clustering_channels : list, optional\n Channels used for clustering. If not specified, use `mef_channels`.\n If more than three channels are specified and `plot` is True, only\n a 3D scatter plot will be produced using the first three channels.\n\n statistic_fxn : function, optional\n Function used to calculate the representative fluorescence of each\n subpopulation. Must have the following signature::\n\n s = statistic_fxn(data, **statistic_params)\n\n where `data` is a 1D FCSData object or numpy array, and `s` is a\n float. Statistical functions from numpy, scipy, or FlowCal.stats\n are valid options.\n\n statistic_params : dict, optional\n Additional keyword parameters to pass to `statistic_fxn`.\n\n selection_fxn : function, optional\n Function to use for bead population selection. Must have the\n following signature::\n\n selected_mask = selection_fxn(data_list, **selection_params)\n\n where `data_list` is a list of FCSData objects, each one containing\n the events of one population, and `selected_mask` is a boolean\n array indicating whether the population has been selected (True) or\n discarded (False). If None, don't use a population selection\n procedure.\n\n selection_params : dict, optional\n Additional keyword parameters to pass to `selection_fxn`.\n\n fitting_fxn : function, optional\n Function used to fit the beads fluorescence model and obtain a\n standard curve. Must have the following signature::\n\n std_crv, beads_model, beads_params, \\\\\n beads_model_str, beads_params_names = fitting_fxn(\n fl_rfi, fl_mef, **fitting_params)\n\n where `std_crv` is a function implementing the standard curve,\n `beads_model` is a function implementing the beads fluorescence\n model, `beads_params` is an array containing the fitted parameters\n of the beads model, `beads_model_str` is a string representation\n of the beads model used, `beads_params_names` is a list with the\n parameter names in the same order as they are given in\n `beads_params`, and `fl_rfi` and `fl_mef` are the fluorescence\n values of the beads in RFI units and MEF units, respectively.\n Note that the standard curve and the fitted beads model are not\n necessarily the same.\n\n fitting_params : dict, optional\n Additional keyword parameters to pass to `fitting_fxn`.\n\n Notes\n -----\n The steps involved in generating the MEF transformation function are:\n\n 1. The individual subpopulations of beads are first identified using a\n clustering method of choice. Clustering is performed in all\n specified channels simultaneously.\n 2. The fluorescence of each subpopulation is calculated, for each\n channel in `mef_channels`.\n 3. Some subpopulations are then discarded if they are close to either\n the minimum or the maximum channel range limits. In addition, if the\n MEF value of some subpopulation is unknown (represented as a\n ``np.nan`` in `mef_values`), the whole subpopulation is also\n discarded.\n 4. The measured fluorescence of each subpopulation is compared with\n the known MEF values in `mef_values`, and a standard curve function\n is generated using the appropriate MEF model.\n\n At the end, a transformation function is generated using the calculated\n standard curves, `mef_channels`, and ``FlowCal.transform.to_mef()``.\n\n Note that applying the resulting transformation function to other\n flow cytometry samples only yields correct results if they have been\n taken at the same settings as the calibration beads, for all channels\n in `mef_channels`.\n\n Examples\n --------\n Here is a simple application of this function:\n\n >>> transform_fxn = FlowCal.mef.get_transform_fxn(\n ... beads_data,\n ... mef_channels=['FL1', 'FL3'],\n ... mef_values=[np.array([ 0, 646, 1704, 4827,\n ... 15991, 47609, 135896, 273006],\n ... np.array([ 0, 1614, 4035, 12025,\n ... 31896, 95682, 353225, 1077421]],\n ... )\n >>> sample_mef = transform_fxn(data=sample_rfi,\n ... channels=['FL1', 'FL3'])\n\n Here, we first generate ``transform_fxn`` from flow cytometry data\n contained in ``FCSData`` object ``beads_data``, for channels FL1 and\n FL3, using provided MEF values for each one of these channels. In the\n next line, we use the resulting transformation function to transform\n cell sample data in RFI to MEF.\n\n More data about intermediate steps can be obtained with the option\n ``full_output=True``:\n\n >>> get_transform_output = FlowCal.mef.get_transform_fxn(\n ... beads_data,\n ... mef_channels=['FL1', 'FL3'],\n ... mef_values=[np.array([ 0, 646, 1704, 4827,\n ... 15991, 47609, 135896, 273006],\n ... np.array([ 0, 1614, 4035, 12025,\n ... 31896, 95682, 353225, 1077421]],\n ... full_output=True)\n\n In this case, the output ``get_transform_output`` will be a\n `MEFOutput` ``namedtuple`` similar to the following::\n\n FlowCal.mef.MEFOutput(\n transform_fxn=,\n mef_channels=['FL1', 'FL3'],\n clustering={\n 'labels' : [7, 2, 2, ... 4, 3, 5]\n },\n statistic={\n 'values' : [np.array([ 101, 150, 231, 433,\n 1241, 3106, 7774, 9306]),\n np.array([ 3, 30, 71, 204,\n 704, 2054, 6732, 9912])]\n },\n selection={\n 'rfi' : [np.array([ 101, 150, 231, 433,\n 1241, 3106, 7774]),\n np.array([ 30, 71, 204, 704,\n 2054, 6732])]\n 'mef' : [np.array([ 0, 646, 1704, 4827,\n 15991, 47609, 135896]),\n np.array([ 1614, 4035, 12025, 31896,\n 95682, 353225])]\n },\n fitting={\n 'std_crv' : [>,\n >]\n 'beads_model' : [>,\n >]\n 'beads_params' : [np.array([ 1.09e0, 2.02e0, 1.15e3]),\n np.array([9.66e-1, 4.17e0, 6.63e1])]\n 'beads_model_str' : ['m*log(fl_rfi) + b =\\\n log(fl_mef_auto + fl_mef)',\n 'm*log(fl_rfi) + b =\\\n log(fl_mef_auto + fl_mef)']\n 'beads_params_names' : [['m', 'b', 'fl_mef_auto],\n ['m', 'b', 'fl_mef_auto]]\n },\n )\n\n \"\"\"\n if verbose:\n prev_precision = np.get_printoptions()['precision']\n np.set_printoptions(precision=2)\n\n # Create directory if plot is True\n if plot and plot_dir is not None:\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n # Default plot filename\n if plot_filename is None:\n plot_filename = str(data_beads)\n\n # mef_channels and mef_values should be iterables.\n if hasattr(mef_channels, '__iter__') \\\n and not isinstance(mef_channels, six.string_types):\n mef_channels = list(mef_channels)\n else:\n mef_channels = [mef_channels]\n mef_values = [mef_values]\n\n # Ensure matching number of `mef_values` for all channels (this implies\n # that the calibration beads have the same number of subpopulations for\n # all channels).\n if not np.all([len(mef_values_channel)==len(mef_values[0])\n for mef_values_channel in mef_values]):\n msg = \"innermost sequences of mef_values must have the same length\"\n msg += \" (same number of bead subpopulations must exist for each\"\n msg += \" channel)\"\n raise ValueError(msg)\n\n ###\n # 1. Clustering\n ###\n\n # If clustering channels not specified, use channels in mef_channels\n if clustering_channels is None:\n clustering_channels = mef_channels\n\n # Get number of clusters from number of specified MEF values\n n_clusters = len(mef_values[0])\n\n # Run clustering function\n labels = clustering_fxn(data_beads[:, clustering_channels],\n n_clusters,\n **clustering_params)\n\n # Separate events corresponding to each cluster\n unique_labels = np.array(list(set(labels)))\n populations = [data_beads[labels == i] for i in unique_labels]\n\n # Sort populations based on distance to the origin\n population_dist = [np.sum((np.mean(population[:,clustering_channels],\n axis=0))**2)\n for population in populations]\n population_sorted_idx = np.argsort(population_dist)\n populations = [populations[i] for i in population_sorted_idx]\n\n # Print information\n if verbose:\n # Calculate and display percentage of events on each population\n population_count = np.array([population.shape[0]\n for population in populations])\n population_perc = population_count * 100.0 / population_count.sum()\n\n # Print information\n print(\"Step 1: Clustering\")\n print(\" Number of populations to find: {}\".format(n_clusters))\n print(\" Percentage of events in each population:\")\n print(\" \" + str(population_perc))\n\n # Plot\n if plot:\n if plot_dir is not None:\n savefig = '{}/clustering_{}.png'.format(plot_dir, plot_filename)\n else:\n savefig = None\n\n # If used one channel for clustering, make histogram\n if len(clustering_channels) == 1:\n plt.figure(figsize=(8,4))\n FlowCal.plot.hist1d(\n populations,\n channel=clustering_channels[0],\n xscale='logicle',\n bins=256,\n alpha=0.75,\n savefig=savefig)\n\n # If used two channels for clustering, make 2D scatter plot\n elif len(clustering_channels) == 2:\n plt.figure(figsize=(6,4))\n FlowCal.plot.scatter2d(\n populations,\n channels=clustering_channels,\n xscale='logicle',\n yscale='logicle',\n savefig=savefig)\n\n # If used three channels or more for clustering, make 3D scatter plot\n # with the first three.\n elif len(clustering_channels) >= 3:\n plt.figure(figsize=(8,6))\n FlowCal.plot.scatter3d_and_projections(\n populations,\n channels=clustering_channels[:3],\n xscale='logicle',\n yscale='logicle',\n zscale='logicle',\n savefig=savefig)\n\n if plot_dir is not None:\n plt.close()\n\n # Initialize lists to acumulate results\n std_crv_res = []\n if full_output:\n stats_values_res = []\n selected_rfi_res = []\n selected_mef_res = []\n beads_model_res = []\n beads_params_res =[]\n beads_model_str_res =[]\n beads_params_names_res =[]\n\n # Iterate through each mef channel\n for mef_channel, mef_values_channel in zip(mef_channels, mef_values):\n\n populations_channel = [population[:, mef_channel]\n for population in populations]\n\n ###\n # 2. Calculate statistics in each subpopulation.\n ###\n\n # Calculate statistics\n stats_values = [statistic_fxn(population, **statistic_params)\n for population in populations_channel]\n stats_values = np.array(stats_values)\n\n # Accumulate results\n if full_output:\n stats_values_res.append(stats_values)\n\n # Print information\n if verbose:\n print(\"({}) Step 2: Population Statistic\".format(mef_channel))\n print(\" Fluorescence of each population (RFI):\")\n print(\" \" + str(stats_values))\n\n ###\n # 3. Select populations to be used for fitting\n ###\n\n # Select populations based on selection_fxn\n if selection_fxn is not None:\n selected_mask = selection_fxn(\n [population for population in populations_channel],\n **selection_params)\n else:\n selected_mask = np.ones(n_clusters, dtype=bool)\n\n # Discard values specified as nan in mef_values_channel\n selected_mask = np.logical_and(selected_mask,\n ~np.isnan(mef_values_channel))\n\n # Get selected rfi and mef values\n selected_rfi = stats_values[selected_mask]\n selected_mef = mef_values_channel[selected_mask]\n\n # Accumulate results\n if full_output:\n selected_rfi_res.append(selected_rfi)\n selected_mef_res.append(selected_mef)\n\n # Print information\n if verbose:\n print(\"({}) Step 3: Population Selection\".format(mef_channel))\n print(\" {} populations selected.\".format(len(selected_rfi)))\n print(\" Fluorescence of selected populations (RFI):\")\n print(\" \" + str(selected_rfi))\n print(\" Fluorescence of selected populations (MEF):\")\n print(\" \" + str(selected_mef))\n\n # Plot\n if plot:\n # Get colors for each population. Colors are taken from the default\n # colormap in FlowCal.plot, if the population has been selected.\n # Otherwise, the population is displayed in gray.\n color_levels = np.linspace(0, 1, n_clusters)\n colors = [FlowCal.plot.cmap_default(level)\n if selected else (0.6, 0.6, 0.6)\n for selected, level in zip(selected_mask, color_levels)]\n\n # Plot histograms\n plt.figure(figsize=(8,4))\n FlowCal.plot.hist1d(populations,\n channel=mef_channel,\n xscale='logicle',\n bins=256,\n alpha=0.75,\n facecolor=colors)\n\n # Plot a vertical line for each population, with an x coordinate\n # corresponding to their statistic value.\n ylim = plt.ylim()\n for val, color in zip(stats_values, colors):\n plt.plot([val, val], [ylim[0], ylim[1]], color=color)\n plt.ylim(ylim)\n\n # Save and close\n if plot_dir is not None:\n plt.tight_layout()\n plt.savefig('{}/populations_{}_{}.png'.format(plot_dir,\n mef_channel,\n plot_filename),\n dpi=FlowCal.plot.savefig_dpi)\n plt.close()\n\n ###\n # 4. Get standard curve\n ###\n\n # Fit\n fitting_output = fitting_fxn(selected_rfi,\n selected_mef,\n **fitting_params)\n std_crv = fitting_output[0]\n beads_model = fitting_output[1]\n beads_params = fitting_output[2]\n beads_model_str = fitting_output[3]\n beads_params_names = fitting_output[4]\n # Accumulate results\n std_crv_res.append(std_crv)\n if full_output:\n beads_model_res.append(beads_model)\n beads_params_res.append(beads_params)\n beads_model_str_res.append(beads_model_str)\n beads_params_names_res.append(beads_params_names)\n\n # Print information\n if verbose:\n print(\"({}) Step 4: Standard Curve Fitting\".format(mef_channel))\n print(\" Parameters of bead fluorescence model:\")\n print(\" \" + str(beads_params))\n\n # Plot\n if plot:\n # Get channel range\n xlim = populations[0].range(channels=mef_channel)\n # The plot will be made in log scale. If the lower limit of the\n # range is zero or less, replace by one or some lower value, such\n # that the range covers at least five decades.\n if xlim[0] <= 0:\n xlim[0] = min(1., xlim[1]/1e5)\n\n # Plot standard curve\n plt.figure(figsize=(6,4))\n plot_standard_curve(selected_rfi,\n selected_mef,\n beads_model,\n std_crv,\n xscale='log',\n yscale='log',\n xlim=xlim)\n plt.xlabel('{} (a.u.)'.format(mef_channel))\n plt.ylabel('{} (MEF)'.format(mef_channel))\n\n # Save if required\n if plot_dir is not None:\n plt.tight_layout()\n plt.savefig('{}/std_crv_{}_{}.png'.format(plot_dir,\n mef_channel,\n plot_filename),\n dpi=FlowCal.plot.savefig_dpi)\n plt.close()\n\n # Make output transformation function\n transform_fxn = functools.partial(FlowCal.transform.to_mef,\n sc_list=std_crv_res,\n sc_channels=mef_channels)\n\n if verbose:\n np.set_printoptions(precision=prev_precision)\n\n if full_output:\n # Clustering results\n clustering_res = {}\n clustering_res['labels'] = labels\n # Population stats results\n statistic_res = {}\n statistic_res['values'] = stats_values_res\n # Population selection results\n selection_res = {}\n selection_res['rfi'] = selected_rfi_res\n selection_res['mef'] = selected_mef_res\n # Fitting results\n fitting_res = {}\n fitting_res['std_crv'] = std_crv_res\n fitting_res['beads_model'] = beads_model_res\n fitting_res['beads_params'] = beads_params_res\n fitting_res['beads_model_str'] = beads_model_str_res\n fitting_res['beads_params_names'] = beads_params_names_res\n\n # Make namedtuple\n fields = ['mef_channels',\n 'transform_fxn',\n 'clustering',\n 'statistic',\n 'selection',\n 'fitting']\n MEFOutput = collections.namedtuple('MEFOutput', fields)\n out = MEFOutput(mef_channels=mef_channels,\n transform_fxn=transform_fxn,\n clustering=clustering_res,\n statistic=statistic_res,\n selection=selection_res,\n fitting=fitting_res)\n return out\n else:\n return transform_fxn", "response": "Returns a function to convert flow cytometry data to MEF."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef read_table(filename, sheetname, index_col=None):\n # Catch sheetname as list or None\n if sheetname is None or \\\n (hasattr(sheetname, '__iter__') \\\n and not isinstance(sheetname, six.string_types)):\n raise TypeError(\"sheetname should specify a single sheet\")\n\n # Load excel table using pandas\n # Parameter specifying sheet name is slightly different depending on pandas'\n # version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n table = pd.read_excel(filename,\n sheetname=sheetname,\n index_col=index_col)\n else:\n table = pd.read_excel(filename,\n sheet_name=sheetname,\n index_col=index_col)\n # Eliminate rows whose index are null\n if index_col is not None:\n table = table[pd.notnull(table.index)]\n # Check for duplicated rows\n if table.index.has_duplicates:\n raise ValueError(\"sheet {} on file {} contains duplicated values \"\n \"for column {}\".format(sheetname, filename, index_col))\n\n return table", "response": "Reads an Excel table into a pandas DataFrame."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef write_workbook(filename, table_list, column_width=None):\n # Modify default header format\n # Pandas' default header format is bold text with thin borders. Here we\n # use bold text only, without borders.\n # The header style structure is in pd.core.format in pandas<=0.18.0,\n # pd.formats.format in 0.18.1<=pandas<0.20, and pd.io.formats.excel in\n # pandas>=0.20.\n # Also, wrap in a try-except block in case style structure is not found.\n format_module_found = False\n try:\n # Get format module\n if packaging.version.parse(pd.__version__) \\\n <= packaging.version.parse('0.18'):\n format_module = pd.core.format\n elif packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.20'):\n format_module = pd.formats.format\n else:\n import pandas.io.formats.excel as format_module\n # Save previous style, replace, and indicate that previous style should\n # be restored at the end\n old_header_style = format_module.header_style\n format_module.header_style = {\"font\": {\"bold\": True}}\n format_module_found = True\n except AttributeError as e:\n pass\n\n # Generate output writer object\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n\n # Write tables\n for sheet_name, df in table_list:\n # Convert index names to regular columns\n df = df.reset_index()\n # Write to an Excel sheet\n df.to_excel(writer, sheet_name=sheet_name, index=False)\n # Set column width\n if column_width is None:\n for i, (col_name, column) in enumerate(six.iteritems(df)):\n # Get the maximum number of characters in a column\n max_chars_col = column.astype(str).str.len().max()\n max_chars_col = max(len(col_name), max_chars_col)\n # Write width\n writer.sheets[sheet_name].set_column(\n i,\n i,\n width=1.*max_chars_col)\n else:\n writer.sheets[sheet_name].set_column(\n 0,\n len(df.columns) - 1,\n width=column_width)\n\n # Write excel file\n writer.save()\n\n # Restore previous header format\n if format_module_found:\n format_module.header_style = old_header_style", "response": "Writes an Excel workbook from a list of tables."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nprocess calibration bead samples, as specified by an input table. This function processes the entries in `beads_table`. For each row, the function does the following: - Load the FCS file specified in the field \"File Path\". - Transform the forward scatter/side scatter and fluorescence channels to RFI - Remove the 250 first and 100 last events. - Remove saturated events in the forward scatter and side scatter channels. - Apply density gating on the forward scatter/side scatter channels. - Generate a standard curve transformation function, for each fluorescence channel in which the associated MEF values are specified. - Generate forward/side scatter density plots and fluorescence histograms, and plots of the clustering and fitting steps of standard curve generation, if `plot` = True. Names of forward/side scatter and fluorescence channels are taken from `instruments_table`. Parameters ---------- beads_table : DataFrame Table specifying beads samples to be processed. For more information about the fields required in this table, please consult the module's documentation. instruments_table : DataFrame Table specifying instruments. For more information about the fields required in this table, please consult the module's documentation. base_dir : str, optional Directory from where all the other paths are specified. verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. plot_dir : str, optional Directory relative to `base_dir` into which plots are saved. If `plot` is False, this parameter is ignored. If ``plot==True`` and ``plot_dir is None``, plot without saving. full_output : bool, optional Flag indicating whether to include an additional output, containing intermediate results from the generation of the MEF transformation functions. get_transform_fxn_kwargs : dict, optional Additional parameters passed directly to internal ``mef.get_transform_fxn()`` function call. Returns ------- beads_samples : list of FCSData objects A list of processed, gated, and transformed samples, as specified in `beads_table`, in the order of ``beads_table.index``. mef_transform_fxns : OrderedDict A dictionary of MEF transformation functions, indexed by ``beads_table.index``. mef_outputs : list, only if ``full_output==True`` A list with intermediate results of the generation of the MEF transformation functions. For every entry in `beads_table`, :func:`FlowCal.mef.get_transform_fxn()` is called on the corresponding processed and gated beads sample with ``full_output=True``, and the full output (a `MEFOutput` ``namedtuple``) is appended to `mef_outputs`. Please refer to the output section of :func:`FlowCal.mef.get_transform_fxn()`'s documentation for more information.", "response": "def process_beads_table(beads_table,\n instruments_table,\n base_dir=\".\",\n verbose=False,\n plot=False,\n plot_dir=None,\n full_output=False,\n get_transform_fxn_kwargs={}):\n \"\"\"\n Process calibration bead samples, as specified by an input table.\n\n This function processes the entries in `beads_table`. For each row, the\n function does the following:\n - Load the FCS file specified in the field \"File Path\".\n - Transform the forward scatter/side scatter and fluorescence\n channels to RFI\n - Remove the 250 first and 100 last events.\n - Remove saturated events in the forward scatter and side scatter\n channels.\n - Apply density gating on the forward scatter/side scatter\n channels.\n - Generate a standard curve transformation function, for each\n fluorescence channel in which the associated MEF values are\n specified.\n - Generate forward/side scatter density plots and fluorescence\n histograms, and plots of the clustering and fitting steps of\n standard curve generation, if `plot` = True.\n\n Names of forward/side scatter and fluorescence channels are taken from\n `instruments_table`.\n\n Parameters\n ----------\n beads_table : DataFrame\n Table specifying beads samples to be processed. For more\n information about the fields required in this table, please consult\n the module's documentation.\n instruments_table : DataFrame\n Table specifying instruments. For more information about the fields\n required in this table, please consult the module's documentation.\n base_dir : str, optional\n Directory from where all the other paths are specified.\n verbose : bool, optional\n Whether to print information messages during the execution of this\n function.\n plot : bool, optional\n Whether to generate and save density/histogram plots of each\n sample, and each beads sample.\n plot_dir : str, optional\n Directory relative to `base_dir` into which plots are saved. If\n `plot` is False, this parameter is ignored. If ``plot==True`` and\n ``plot_dir is None``, plot without saving.\n full_output : bool, optional\n Flag indicating whether to include an additional output, containing\n intermediate results from the generation of the MEF transformation\n functions.\n get_transform_fxn_kwargs : dict, optional\n Additional parameters passed directly to internal\n ``mef.get_transform_fxn()`` function call.\n\n Returns\n -------\n beads_samples : list of FCSData objects\n A list of processed, gated, and transformed samples, as specified\n in `beads_table`, in the order of ``beads_table.index``.\n mef_transform_fxns : OrderedDict\n A dictionary of MEF transformation functions, indexed by\n ``beads_table.index``.\n mef_outputs : list, only if ``full_output==True``\n A list with intermediate results of the generation of the MEF\n transformation functions. For every entry in `beads_table`,\n :func:`FlowCal.mef.get_transform_fxn()` is called on the\n corresponding processed and gated beads sample with\n ``full_output=True``, and the full output (a `MEFOutput`\n ``namedtuple``) is appended to `mef_outputs`. Please refer to the\n output section of :func:`FlowCal.mef.get_transform_fxn()`'s\n documentation for more information.\n\n \"\"\"\n # Initialize output variables\n beads_samples = []\n mef_transform_fxns = collections.OrderedDict()\n mef_outputs = []\n\n # Return empty structures if beads table is empty\n if beads_table.empty:\n if full_output:\n return beads_samples, mef_transform_fxns, mef_outputs\n else:\n return beads_samples, mef_transform_fxns\n\n if verbose:\n msg = \"Processing Beads table ({} entries)\".format(len(beads_table))\n print(\"\")\n print(msg)\n print(\"=\"*len(msg))\n\n # Check that plotting directory exist, create otherwise\n if plot and plot_dir is not None \\\n and not os.path.exists(os.path.join(base_dir, plot_dir)):\n os.makedirs(os.path.join(base_dir, plot_dir))\n\n # Extract header and channel names for which MEF values are specified.\n headers = list(beads_table.columns)\n mef_headers_all = [h for h in headers if re_mef_values.match(h)]\n mef_channels_all = [re_mef_values.match(h).group(1)\n for h in mef_headers_all]\n\n # Iterate through table\n # We will look for a ExcelUIException on each iteration. If an exception\n # is caught, it will be stored in beads_samples.\n for beads_id, beads_row in beads_table.iterrows():\n try:\n ###\n # Instrument Data\n ###\n # Get the appropriate row in the instrument table\n instruments_row = instruments_table.loc[beads_row['Instrument ID']]\n # Scatter channels: Foward Scatter, Side Scatter\n sc_channels = [instruments_row['Forward Scatter Channel'],\n instruments_row['Side Scatter Channel'],\n ]\n # Fluorescence channels is a comma-separated list\n fl_channels = instruments_row['Fluorescence Channels'].split(',')\n fl_channels = [s.strip() for s in fl_channels]\n\n ###\n # Beads Data\n ###\n if verbose:\n print(\"\\nBeads ID {}...\".format(beads_id))\n print(\"Loading file \\\"{}\\\"...\".format(beads_row['File Path']))\n\n # Attempt to open file\n filename = os.path.join(base_dir, beads_row['File Path'])\n try:\n beads_sample = FlowCal.io.FCSData(filename)\n except IOError:\n raise ExcelUIException(\"file \\\"{}\\\" not found\".format(\n beads_row['File Path']))\n # Check that the number of events is greater than 400\n if beads_sample.shape[0] < 400:\n raise ExcelUIException(\"number of events is lower than 400\")\n\n ###\n # Transform\n ###\n if verbose:\n print(\"Performing data transformation...\")\n # Transform FSC/SSC and fluorescence channels to linear scale\n beads_sample = FlowCal.transform.to_rfi(beads_sample,\n sc_channels + fl_channels)\n\n # Parse clustering channels data\n cluster_channels = beads_row['Clustering Channels'].split(',')\n cluster_channels = [cc.strip() for cc in cluster_channels]\n\n ###\n # Gate\n ###\n if verbose:\n print(\"Performing gating...\")\n # Remove first and last events. Transients in fluidics can make the\n # first few and last events slightly different from the rest.\n beads_sample_gated = FlowCal.gate.start_end(beads_sample,\n num_start=250,\n num_end=100)\n # Remove saturating events in forward/side scatter, if the FCS data\n # type is integer. The value of a saturating event is taken\n # automatically from `beads_sample_gated.range`.\n if beads_sample_gated.data_type == 'I':\n beads_sample_gated = FlowCal.gate.high_low(\n beads_sample_gated,\n channels=sc_channels)\n # Density gating\n try:\n beads_sample_gated, __, gate_contour = FlowCal.gate.density2d(\n data=beads_sample_gated,\n channels=sc_channels,\n gate_fraction=beads_row['Gate Fraction'],\n xscale='logicle',\n yscale='logicle',\n sigma=5.,\n full_output=True)\n except ValueError as ve:\n raise ExcelUIException(ve.message)\n\n # Plot forward/side scatter density plot and fluorescence histograms\n if plot:\n if verbose:\n print(\"Plotting density plot and histogram...\")\n # Density plot parameters\n density_params = {}\n density_params['mode'] = 'scatter'\n density_params[\"title\"] = \"{} ({:.1f}% retained)\".format(\n beads_id,\n beads_sample_gated.shape[0] * 100. / beads_sample.shape[0])\n density_params['xscale'] = 'logicle'\n density_params['yscale'] = 'logicle'\n # Beads have a tight distribution, so axis limits will be set\n # from 0.75 decades below the 5th percentile to 0.75 decades\n # above the 95th percentile.\n density_params['xlim'] = \\\n (np.percentile(beads_sample_gated[:, sc_channels[0]],\n 5) / (10**0.75),\n np.percentile(beads_sample_gated[:, sc_channels[0]],\n 95) * (10**0.75),\n )\n density_params['ylim'] = \\\n (np.percentile(beads_sample_gated[:, sc_channels[1]],\n 5) / (10**0.75),\n np.percentile(beads_sample_gated[:, sc_channels[1]],\n 95) * (10**0.75),\n )\n # Beads have a tight distribution, so less smoothing should be\n # applied for visualization\n density_params['sigma'] = 5.\n # Histogram plot parameters\n hist_params = {'xscale': 'logicle'}\n # Plot\n if plot_dir is not None:\n figname = os.path.join(\n base_dir,\n plot_dir,\n \"density_hist_{}.png\".format(beads_id))\n else:\n figname = None\n plt.figure(figsize=(6,4))\n FlowCal.plot.density_and_hist(\n beads_sample,\n beads_sample_gated,\n density_channels=sc_channels,\n hist_channels=cluster_channels,\n gate_contour=gate_contour,\n density_params=density_params,\n hist_params=hist_params,\n savefig=figname)\n\n ###\n # Process MEF values\n ###\n # For each fluorescence channel, check whether a list of known MEF\n # values of the bead subpopulations is provided in `beads_row`. This\n # involves checking that a column named \"[channel] MEF Values\"\n # exists and is not empty. If so, store the name of the channel in\n # `mef_channels`, and the specified MEF values in `mef_values`.\n ###\n mef_values = []\n mef_channels = []\n for fl_channel in fl_channels:\n if fl_channel in mef_channels_all:\n # Get header from channel name\n mef_header = \\\n mef_headers_all[mef_channels_all.index(fl_channel)]\n # Extract text. If empty, ignore.\n mef_str = beads_row[mef_header]\n if pd.isnull(mef_str):\n continue\n # Save channel name\n mef_channels.append(fl_channel)\n # Parse list of values\n mef = mef_str.split(',')\n mef = [int(e) if e.strip().isdigit() else np.nan\n for e in mef]\n mef_values.append(mef)\n\n # Ensure matching number of `mef_values` for all channels (this\n # implies that the calibration beads have the same number of\n # subpopulations for all channels).\n if mef_values:\n if not np.all([len(mef_values_channel)==len(mef_values[0])\n for mef_values_channel in mef_values]):\n raise ExcelUIException(\"Must specify the same number of\"\n + \" MEF Values for each channel.\"\n + \" Use 'None' to instruct FlowCal\"\n + \" to ignore a detected\"\n + \" subpopulation.\")\n mef_values = np.array(mef_values)\n\n # Obtain standard curve transformation\n if mef_channels:\n if verbose:\n if len(mef_channels) == 1:\n print(\"Calculating standard curve for channel {}...\" \\\n .format(mef_channels[0]))\n else:\n print(\"Calculating standard curve for channels {}...\" \\\n .format(\", \".join(mef_channels)))\n\n mef_output = FlowCal.mef.get_transform_fxn(\n beads_sample_gated,\n mef_values,\n mef_channels=mef_channels,\n clustering_channels=cluster_channels,\n verbose=False,\n plot=plot,\n plot_filename=beads_id,\n plot_dir=os.path.join(base_dir, plot_dir) \\\n if plot_dir is not None else None,\n full_output=full_output,\n **get_transform_fxn_kwargs)\n\n if full_output:\n mef_transform_fxn = mef_output.transform_fxn\n else:\n mef_transform_fxn = mef_output\n\n else:\n mef_transform_fxn = None\n mef_output = None\n\n except ExcelUIException as e:\n # Print Exception message\n if verbose:\n print(\"ERROR: {}\".format(str(e)))\n # Append exception to beads_samples array, and None to everything\n # else\n beads_samples.append(e)\n mef_transform_fxns[beads_id] = None\n if full_output:\n mef_outputs.append(None)\n\n else:\n # If no errors were found, store results\n beads_samples.append(beads_sample_gated)\n mef_transform_fxns[beads_id] = mef_transform_fxn\n if full_output:\n mef_outputs.append(mef_output)\n\n\n if full_output:\n return beads_samples, mef_transform_fxns, mef_outputs\n else:\n return beads_samples, mef_transform_fxns"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef process_samples_table(samples_table,\n instruments_table,\n mef_transform_fxns=None,\n beads_table=None,\n base_dir=\".\",\n verbose=False,\n plot=False,\n plot_dir=None):\n \"\"\"\n Process flow cytometry samples, as specified by an input table.\n\n The function processes each entry in `samples_table`, and does the\n following:\n - Load the FCS file specified in the field \"File Path\".\n - Transform the forward scatter/side scatter to RFI.\n - Transform the fluorescence channels to the units specified in the\n column \" Units\".\n - Remove the 250 first and 100 last events.\n - Remove saturated events in the forward scatter and side scatter\n channels.\n - Apply density gating on the forward scatter/side scatter\n channels.\n - Plot combined forward/side scatter density plots and fluorescence\n historgrams, if `plot` = True.\n\n Names of forward/side scatter and fluorescence channels are taken from\n `instruments_table`.\n\n Parameters\n ----------\n samples_table : DataFrame\n Table specifying samples to be processed. For more information\n about the fields required in this table, please consult the\n module's documentation.\n instruments_table : DataFrame\n Table specifying instruments. For more information about the fields\n required in this table, please consult the module's documentation.\n mef_transform_fxns : dict or OrderedDict, optional\n Dictionary containing MEF transformation functions. If any entry\n in `samples_table` requires transformation to MEF, a key: value\n pair must exist in mef_transform_fxns, with the key being equal to\n the contents of field \"Beads ID\".\n beads_table : DataFrame, optional\n Table specifying beads samples used to generate\n `mef_transform_fxns`. This is used to check if a beads sample was\n taken at the same acquisition settings as a sample to be\n transformed to MEF. For any beads sample and channel for which a\n MEF transformation function has been generated, the following\n fields should be populated: `` Amp. Type`` and\n `` Detector Volt``. If `beads_table` is not specified, no\n checking will be performed.\n base_dir : str, optional\n Directory from where all the other paths are specified.\n verbose : bool, optional\n Whether to print information messages during the execution of this\n function.\n plot : bool, optional\n Whether to generate and save density/histogram plots of each\n sample, and each beads sample.\n plot_dir : str, optional\n Directory relative to `base_dir` into which plots are saved. If\n `plot` is False, this parameter is ignored. If ``plot==True`` and\n ``plot_dir is None``, plot without saving.\n\n Returns\n -------\n samples : list of FCSData objects\n A list of processed, gated, and transformed samples, as specified\n in `samples_table`, in the order of ``samples_table.index``.\n\n \"\"\"\n # Initialize output variable\n samples = []\n\n # Return empty list if samples table is empty\n if samples_table.empty:\n return samples\n\n if verbose:\n msg = \"Processing Samples table ({} entries)\".format(len(samples_table))\n print(\"\")\n print(msg)\n print(\"=\"*len(msg))\n\n # Check that plotting directory exist, create otherwise\n if plot and plot_dir is not None \\\n and not os.path.exists(os.path.join(base_dir, plot_dir)):\n os.makedirs(os.path.join(base_dir, plot_dir))\n\n # Extract header and channel names for which units are specified.\n headers = list(samples_table.columns)\n report_headers_all = [h for h in headers if re_units.match(h)]\n report_channels_all = [re_units.match(h).group(1)\n for h in report_headers_all]\n\n # Iterate through table\n # We will look for a ExcelUIException on each iteration. If an exception\n # is caught, it will be stored in beads_samples.\n for sample_id, sample_row in samples_table.iterrows():\n try:\n ###\n # Instrument Data\n ###\n # Get the appropriate row in the instrument table\n instruments_row = instruments_table.loc[sample_row['Instrument ID']]\n # Scatter channels: Foward Scatter, Side Scatter\n sc_channels = [instruments_row['Forward Scatter Channel'],\n instruments_row['Side Scatter Channel'],\n ]\n # Fluorescence channels is a comma-separated list\n fl_channels = instruments_row['Fluorescence Channels'].split(',')\n fl_channels = [s.strip() for s in fl_channels]\n\n ###\n # Sample Data\n ###\n if verbose:\n print(\"\\nSample ID {}...\".format(sample_id))\n print(\"Loading file \\\"{}\\\"...\".format(sample_row['File Path']))\n\n # Attempt to open file\n filename = os.path.join(base_dir, sample_row['File Path'])\n try:\n sample = FlowCal.io.FCSData(filename)\n except IOError:\n raise ExcelUIException(\"file \\\"{}\\\" not found\".format(\n sample_row['File Path']))\n # Check that the number of events is greater than 400\n if sample.shape[0] < 400:\n raise ExcelUIException(\"number of events is lower than 400\")\n\n ###\n # Transform\n ###\n if verbose:\n print(\"Performing data transformation...\")\n # Transform FSC/SSC to linear scale\n sample = FlowCal.transform.to_rfi(sample, sc_channels)\n\n # Parse fluorescence channels in which to transform\n report_channels = []\n report_units = []\n for fl_channel in fl_channels:\n if fl_channel in report_channels_all:\n # Get header from channel name\n report_header = report_headers_all[\n report_channels_all.index(fl_channel)]\n # Extract text. If empty, ignore.\n units_str = sample_row[report_header]\n if pd.isnull(units_str):\n continue\n # Decide what transformation to perform\n units = units_str.strip()\n if units.lower() == 'channel':\n units_label = \"Channel Number\"\n\n elif units.lower() == 'rfi':\n units_label = \"Relative Fluorescence Intensity, RFI\"\n sample = FlowCal.transform.to_rfi(sample, fl_channel)\n elif units.lower() == 'a.u.' or units.lower() == 'au':\n units_label = \"Arbitrary Units, a.u.\"\n sample = FlowCal.transform.to_rfi(sample, fl_channel)\n\n elif units.lower() == 'mef':\n units_label = \"Molecules of Equivalent Fluorophore, MEF\"\n # Check if transformation function is available\n if mef_transform_fxns[sample_row['Beads ID']] is None:\n raise ExcelUIException(\"MEF transformation \"\n \"function not available\")\n\n # If beads_table is available, check if the same\n # settings have been used to acquire the corresponding\n # beads sample\n if beads_table is not None:\n beads_row = beads_table.loc[sample_row['Beads ID']]\n # Instrument\n beads_iid = beads_row['Instrument ID']\n if beads_iid != sample_row['Instrument ID']:\n raise ExcelUIException(\"Instruments for \"\n \"acquisition of beads and samples are not \"\n \"the same (beads {}'s instrument: {}, \"\n \"sample's instrument: {})\".format(\n sample_row['Beads ID'],\n beads_iid,\n sample_row['Instrument ID']))\n # Amplification type\n beads_at = beads_row['{} Amp. Type'. \\\n format(fl_channel)]\n if sample.amplification_type(fl_channel)[0]:\n sample_at = \"Log\"\n else:\n sample_at = \"Linear\"\n if beads_at != sample_at:\n raise ExcelUIException(\"Amplification type for \"\n \"acquisition of beads and samples in \"\n \"channel {} are not the same (beads {}'s \"\n \"amplification: {}, sample's \"\n \"amplification: {})\".format(\n fl_channel,\n sample_row['Beads ID'],\n beads_at,\n sample_at))\n # Detector voltage\n beads_dv = beads_row['{} Detector Volt.'. \\\n format(fl_channel)]\n if sample.detector_voltage(fl_channel) is not None \\\n and beads_dv != sample.detector_voltage(\n fl_channel):\n raise ExcelUIException(\"Detector voltage for \"\n \"acquisition of beads and samples in \"\n \"channel {} are not the same (beads {}'s \"\n \"detector voltage: {}, sample's \"\n \"detector voltage: {})\".format(\n fl_channel,\n sample_row['Beads ID'],\n beads_dv,\n sample.detector_voltage(fl_channel)))\n\n # First, transform to RFI\n sample = FlowCal.transform.to_rfi(sample, fl_channel)\n # Attempt to transform to MEF\n # Transformation function raises a ValueError if a\n # standard curve does not exist for a channel\n try:\n sample = mef_transform_fxns[sample_row['Beads ID']](\n sample,\n fl_channel)\n except ValueError:\n raise ExcelUIException(\"no standard curve for \"\n \"channel {}\".format(fl_channel))\n else:\n raise ExcelUIException(\"units \\\"{}\\\" not recognized\". \\\n format(units, sample_id))\n\n # Register that reporting in this channel must be done\n report_channels.append(fl_channel)\n report_units.append(units_label)\n\n ###\n # Gate\n ###\n if verbose:\n print(\"Performing gating...\")\n # Remove first and last events. Transients in fluidics can make the\n # first few and last events slightly different from the rest.\n sample_gated = FlowCal.gate.start_end(sample,\n num_start=250,\n num_end=100)\n # Remove saturating events in forward/side scatter, and fluorescent\n # channels to report, if the FCS data type is integer. The value of\n # a saturating event is taken automatically from\n # `sample_gated.range`.\n if sample_gated.data_type == 'I':\n sample_gated = FlowCal.gate.high_low(\n sample_gated,\n sc_channels + report_channels)\n # Density gating\n try:\n sample_gated, __, gate_contour = FlowCal.gate.density2d(\n data=sample_gated,\n channels=sc_channels,\n gate_fraction=sample_row['Gate Fraction'],\n xscale='logicle',\n yscale='logicle',\n full_output=True)\n except ValueError as ve:\n raise ExcelUIException(ve.message)\n\n # Plot forward/side scatter density plot and fluorescence histograms\n if plot:\n if verbose:\n print(\"Plotting density plot and histogram...\")\n # Density plot parameters\n density_params = {}\n density_params['mode'] = 'scatter'\n density_params[\"title\"] = \"{} ({:.1f}% retained)\".format(\n sample_id,\n sample_gated.shape[0] * 100. / sample.shape[0])\n density_params['xscale'] = 'logicle'\n density_params['yscale'] = 'logicle'\n # Histogram plot parameters\n hist_params = []\n for rc, ru in zip(report_channels, report_units):\n param = {}\n param['xlabel'] = '{} ({})'.format(rc, ru)\n # Only channel numbers are plotted in linear scale\n if (ru == 'Channel Number'):\n param['xscale'] = 'linear'\n else:\n param['xscale'] = 'logicle'\n hist_params.append(param)\n\n # Plot\n if plot_dir is not None:\n figname = os.path.join(\n base_dir,\n plot_dir,\n \"{}.png\".format(sample_id))\n else:\n figname = None\n FlowCal.plot.density_and_hist(\n sample,\n sample_gated,\n gate_contour=gate_contour,\n density_channels=sc_channels,\n density_params=density_params,\n hist_channels=report_channels,\n hist_params=hist_params,\n savefig=figname)\n\n except ExcelUIException as e:\n # Print Exception message\n if verbose:\n print(\"ERROR: {}\".format(str(e)))\n # Append exception to samples array\n samples.append(e)\n\n else:\n # If no errors were found, store results\n samples.append(sample_gated)\n\n return samples", "response": "This function processes the samples table and returns a new flow cytometry tree."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef add_beads_stats(beads_table, beads_samples, mef_outputs=None):\n # The index name is not preserved if beads_table is empty.\n # Save the index name for later\n beads_table_index_name = beads_table.index.name\n\n # Add per-row info\n notes = []\n n_events = []\n acq_time = []\n for beads_sample in beads_samples:\n # Check if sample is an exception, otherwise assume it's an FCSData\n if isinstance(beads_sample, ExcelUIException):\n # Print error message\n notes.append(\"ERROR: {}\".format(str(beads_sample)))\n n_events.append(np.nan)\n acq_time.append(np.nan)\n else:\n notes.append('')\n n_events.append(beads_sample.shape[0])\n acq_time.append(beads_sample.acquisition_time)\n\n beads_table['Analysis Notes'] = notes\n beads_table['Number of Events'] = n_events\n beads_table['Acquisition Time (s)'] = acq_time\n\n # List of channels that require stats columns\n headers = list(beads_table.columns)\n stats_headers = [h for h in headers if re_mef_values.match(h)]\n stats_channels = [re_mef_values.match(h).group(1) for h in stats_headers]\n\n # Iterate through channels\n for header, channel in zip(stats_headers, stats_channels):\n # Add empty columns to table\n beads_table[channel + ' Detector Volt.'] = np.nan\n beads_table[channel + ' Amp. Type'] = \"\"\n if mef_outputs:\n beads_table[channel + ' Beads Model'] = \"\"\n beads_table[channel + ' Beads Params. Names'] = \"\"\n beads_table[channel + ' Beads Params. Values'] = \"\"\n\n # Iterate\n for i, row_id in enumerate(beads_table.index):\n # If error, skip\n if isinstance(beads_samples[i], ExcelUIException):\n continue\n # If MEF values are specified, calculate stats. If not, leave empty.\n if pd.notnull(beads_table[header][row_id]):\n\n # Detector voltage\n # Dataframes, such as beads_table, are modified differently\n # depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n beads_table.set_value(\n row_id,\n channel + ' Detector Volt.',\n beads_samples[i].detector_voltage(channel))\n else:\n beads_table.at[row_id, channel + ' Detector Volt.'] = \\\n beads_samples[i].detector_voltage(channel)\n\n\n # Amplification type\n if beads_samples[i].amplification_type(channel)[0]:\n amplification_type = \"Log\"\n else:\n amplification_type = \"Linear\"\n # Dataframes, such as beads_table, are modified differently\n # depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n beads_table.set_value(row_id,\n channel + ' Amp. Type',\n amplification_type)\n else:\n beads_table.at[row_id, channel + ' Amp. Type'] = \\\n amplification_type\n\n\n # Bead model and parameters\n # Only populate if mef_outputs has been provided\n if mef_outputs:\n # Try to find the current channel among the mef'd channels.\n # If successful, extract bead fitted parameters.\n try:\n mef_channel_index = mef_outputs[i]. \\\n mef_channels.index(channel)\n except ValueError:\n pass\n else:\n # Bead model\n beads_model_str = mef_outputs[i]. \\\n fitting['beads_model_str'][mef_channel_index]\n # Dataframes, such as beads_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n beads_table.set_value(row_id,\n channel + ' Beads Model',\n beads_model_str)\n else:\n beads_table.at[row_id, channel + ' Beads Model'] = \\\n beads_model_str\n\n # Bead parameter names\n params_names = mef_outputs[i]. \\\n fitting['beads_params_names'][mef_channel_index]\n params_names_str = \", \".join([str(p)\n for p in params_names])\n # Dataframes, such as beads_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n beads_table.set_value(\n row_id,\n channel + ' Beads Params. Names',\n params_names_str)\n else:\n beads_table.at[\n row_id,\n channel + ' Beads Params. Names'] = \\\n params_names_str\n\n # Bead parameter values\n params = mef_outputs[i]. \\\n fitting['beads_params'][mef_channel_index]\n params_str = \", \".join([str(p) for p in params])\n # Dataframes, such as beads_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n beads_table.set_value(\n row_id,\n channel + ' Beads Params. Values',\n params_str)\n else:\n beads_table.at[\n row_id,\n channel + ' Beads Params. Values'] = \\\n params_str\n\n # Restore index name if table is empty\n if len(beads_table) == 0:\n beads_table.index.name = beads_table_index_name", "response": "Add stats fields to beads table."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nadd statistics fields to the samples table.", "response": "def add_samples_stats(samples_table, samples):\n \"\"\"\n Add stats fields to samples table.\n\n The following information is added to each row:\n\n - Notes (warnings, errors) resulting from the analysis\n - Number of Events\n - Acquisition Time (s)\n\n The following information is added for each row, for each channel in\n which fluorescence units have been specified:\n\n - Detector voltage (gain)\n - Amplification type\n - Mean\n - Geometric Mean\n - Median\n - Mode\n - Standard Deviation\n - Coefficient of Variation (CV)\n - Geometric Standard Deviation\n - Geometric Coefficient of Variation\n - Inter-Quartile Range\n - Robust Coefficient of Variation (RCV)\n\n Parameters\n ----------\n samples_table : DataFrame\n Table specifying samples to analyze. For more information about the\n fields required in this table, please consult the module's\n documentation.\n samples : list\n FCSData objects from which to calculate statistics. ``samples[i]``\n should correspond to ``samples_table.iloc[i]``.\n\n Notes\n -----\n Geometric statistics (geometric mean, standard deviation, and geometric\n coefficient of variation) are defined only for positive data. If there\n are negative events in any relevant channel of any member of `samples`,\n geometric statistics will only be calculated on the positive events,\n and a warning message will be written to the \"Analysis Notes\" field.\n\n \"\"\"\n # The index name is not preserved if samples_table is empty.\n # Save the index name for later\n samples_table_index_name = samples_table.index.name\n\n # Add per-row info\n notes = []\n n_events = []\n acq_time = []\n for sample in samples:\n # Check if sample is an exception, otherwise assume it's an FCSData\n if isinstance(sample, ExcelUIException):\n # Print error message\n notes.append(\"ERROR: {}\".format(str(sample)))\n n_events.append(np.nan)\n acq_time.append(np.nan)\n else:\n notes.append('')\n n_events.append(sample.shape[0])\n acq_time.append(sample.acquisition_time)\n\n samples_table['Analysis Notes'] = notes\n samples_table['Number of Events'] = n_events\n samples_table['Acquisition Time (s)'] = acq_time\n\n # List of channels that require stats columns\n headers = list(samples_table.columns)\n stats_headers = [h for h in headers if re_units.match(h)]\n stats_channels = [re_units.match(h).group(1) for h in stats_headers]\n\n # Iterate through channels\n for header, channel in zip(stats_headers, stats_channels):\n # Add empty columns to table\n samples_table[channel + ' Detector Volt.'] = np.nan\n samples_table[channel + ' Amp. Type'] = \"\"\n samples_table[channel + ' Mean'] = np.nan\n samples_table[channel + ' Geom. Mean'] = np.nan\n samples_table[channel + ' Median'] = np.nan\n samples_table[channel + ' Mode'] = np.nan\n samples_table[channel + ' Std'] = np.nan\n samples_table[channel + ' CV'] = np.nan\n samples_table[channel + ' Geom. Std'] = np.nan\n samples_table[channel + ' Geom. CV'] = np.nan\n samples_table[channel + ' IQR'] = np.nan\n samples_table[channel + ' RCV'] = np.nan\n for row_id, sample in zip(samples_table.index, samples):\n # If error, skip\n if isinstance(sample, ExcelUIException):\n continue\n # If units are specified, calculate stats. If not, leave empty.\n if pd.notnull(samples_table[header][row_id]):\n # Acquisition settings\n\n # Detector voltage\n # Dataframes, such as samples_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n samples_table.set_value(row_id,\n channel + ' Detector Volt.',\n sample.detector_voltage(channel))\n else:\n samples_table.at[row_id, channel + ' Detector Volt.'] = \\\n sample.detector_voltage(channel)\n\n # Amplification type\n if sample.amplification_type(channel)[0]:\n amplification_type = \"Log\"\n else:\n amplification_type = \"Linear\"\n # Dataframes, such as samples_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n samples_table.set_value(row_id,\n channel + ' Amp. Type',\n amplification_type)\n else:\n samples_table.at[row_id, channel + ' Amp. Type'] = \\\n amplification_type\n\n # Statistics from event list\n # Dataframes, such as samples_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n samples_table.set_value(row_id,\n channel + ' Mean',\n FlowCal.stats.mean(sample, channel))\n samples_table.set_value(row_id,\n channel + ' Median',\n FlowCal.stats.median(sample, channel))\n samples_table.set_value(row_id,\n channel + ' Mode',\n FlowCal.stats.mode(sample, channel))\n samples_table.set_value(row_id,\n channel + ' Std',\n FlowCal.stats.std(sample, channel))\n samples_table.set_value(row_id,\n channel + ' CV',\n FlowCal.stats.cv(sample, channel))\n samples_table.set_value(row_id,\n channel + ' IQR',\n FlowCal.stats.iqr(sample, channel))\n samples_table.set_value(row_id,\n channel + ' RCV',\n FlowCal.stats.rcv(sample, channel))\n else:\n samples_table.at[row_id, channel + ' Mean'] = \\\n FlowCal.stats.mean(sample, channel)\n samples_table.at[row_id, channel + ' Median'] = \\\n FlowCal.stats.median(sample, channel)\n samples_table.at[row_id, channel + ' Mode'] = \\\n FlowCal.stats.mode(sample, channel)\n samples_table.at[row_id, channel + ' Std'] = \\\n FlowCal.stats.std(sample, channel)\n samples_table.at[row_id, channel + ' CV'] = \\\n FlowCal.stats.cv(sample, channel)\n samples_table.at[row_id, channel + ' IQR'] = \\\n FlowCal.stats.iqr(sample, channel)\n samples_table.at[row_id, channel + ' RCV'] = \\\n FlowCal.stats.rcv(sample, channel)\n\n # For geometric statistics, first check for non-positive events.\n # If found, throw a warning and calculate statistics on positive\n # events only.\n if np.any(sample[:, channel] <= 0):\n # Separate positive events\n sample_positive = sample[sample[:, channel] > 0]\n # Throw warning\n msg = \"Geometric statistics for channel\" + \\\n \" {} calculated on positive events\".format(channel) + \\\n \" only ({:.1f}%). \".format(\n 100.*sample_positive.shape[0]/sample.shape[0])\n warnings.warn(\"On sample {}: {}\".format(row_id, msg))\n # Write warning message to table\n if samples_table.loc[row_id, 'Analysis Notes']:\n msg = samples_table.loc[row_id, 'Analysis Notes'] + msg\n # Dataframes, such as samples_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n samples_table.set_value(row_id, 'Analysis Notes', msg)\n else:\n samples_table.at[row_id, 'Analysis Notes'] = msg\n else:\n sample_positive = sample\n\n # Calculate and write geometric statistics\n # Dataframes, such as samples_table, are modified\n # differently depending on pandas' version.\n if packaging.version.parse(pd.__version__) \\\n < packaging.version.parse('0.21'):\n samples_table.set_value(\n row_id,\n channel + ' Geom. Mean',\n FlowCal.stats.gmean(sample_positive, channel))\n samples_table.set_value(\n row_id,\n channel + ' Geom. Std',\n FlowCal.stats.gstd(sample_positive, channel))\n samples_table.set_value(\n row_id,\n channel + ' Geom. CV',\n FlowCal.stats.gcv(sample_positive, channel))\n else:\n samples_table.at[row_id, channel + ' Geom. Mean'] = \\\n FlowCal.stats.gmean(sample_positive, channel)\n samples_table.at[row_id, channel + ' Geom. Std'] = \\\n FlowCal.stats.gstd(sample_positive, channel)\n samples_table.at[row_id, channel + ' Geom. CV'] = \\\n FlowCal.stats.gcv(sample_positive, channel)\n\n # Restore index name if table is empty\n if len(samples_table) == 0:\n samples_table.index.name = samples_table_index_name"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef generate_histograms_table(samples_table, samples, max_bins=1024):\n # Extract channels that require stats histograms\n headers = list(samples_table.columns)\n hist_headers = [h for h in headers if re_units.match(h)]\n hist_channels = [re_units.match(h).group(1) for h in hist_headers]\n\n # The number of columns in the DataFrame has to be set to the maximum\n # number of bins of any of the histograms about to be generated.\n # The following iterates through these histograms and finds the\n # largest.\n n_columns = 0\n for sample_id, sample in zip(samples_table.index, samples):\n if isinstance(sample, ExcelUIException):\n continue\n for header, channel in zip(hist_headers, hist_channels):\n if pd.notnull(samples_table[header][sample_id]):\n if n_columns < sample.resolution(channel):\n n_columns = sample.resolution(channel)\n # Saturate at max_bins\n if n_columns > max_bins:\n n_columns = max_bins\n\n # Declare multi-indexed DataFrame\n index = pd.MultiIndex.from_arrays([[],[],[]],\n names = ['Sample ID', 'Channel', ''])\n columns = ['Bin {}'.format(i + 1) for i in range(n_columns)]\n hist_table = pd.DataFrame([], index=index, columns=columns)\n\n # Generate histograms\n for sample_id, sample in zip(samples_table.index, samples):\n if isinstance(sample, ExcelUIException):\n continue\n for header, channel in zip(hist_headers, hist_channels):\n if pd.notnull(samples_table[header][sample_id]):\n # Get units in which bins are being reported\n unit = samples_table[header][sample_id]\n # Decide which scale to use\n # Channel units result in linear scale. Otherwise, use logicle.\n if unit == 'Channel':\n scale = 'linear'\n else:\n scale = 'logicle'\n # Define number of bins\n nbins = min(sample.resolution(channel), max_bins)\n # Calculate bin edges and centers\n # We generate twice the necessary number of bins. We then take\n # every other value as the proper bin edges, and the remaining\n # values as the bin centers.\n bins_extended = sample.hist_bins(channel, 2*nbins, scale)\n bin_edges = bins_extended[::2]\n bin_centers = bins_extended[1::2]\n # Store bin centers\n hist_table.loc[(sample_id,\n channel,\n 'Bin Centers ({})'.format(unit)),\n columns[0:len(bin_centers)]] = bin_centers\n # Calculate and store histogram counts\n hist, __ = np.histogram(sample[:,channel], bins=bin_edges)\n hist_table.loc[(sample_id, channel, 'Counts'),\n columns[0:len(bin_centers)]] = hist\n\n return hist_table", "response": "Generates a multi - indexed DataFrame containing histograms for each sample and channel in samples_table."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ngenerate a DataFrame with information about FlowCal and the current analysis.", "response": "def generate_about_table(extra_info={}):\n \"\"\"\n Make a table with information about FlowCal and the current analysis.\n\n Parameters\n ----------\n extra_info : dict, optional\n Additional keyword:value pairs to include in the table.\n\n Returns\n -------\n about_table : DataFrame\n Table with information about FlowCal and the current analysis, as\n keyword:value pairs. The following keywords are included: FlowCal\n version, and date and time of analysis. Keywords and values from\n `extra_info` are also included.\n\n \"\"\"\n # Make keyword and value arrays\n keywords = []\n values = []\n # FlowCal version\n keywords.append('FlowCal version')\n values.append(FlowCal.__version__)\n # Analysis date and time\n keywords.append('Date of analysis')\n values.append(time.strftime(\"%Y/%m/%d\"))\n keywords.append('Time of analysis')\n values.append(time.strftime(\"%I:%M:%S%p\"))\n # Add additional keyword:value pairs\n for k, v in six.iteritems(extra_info):\n keywords.append(k)\n values.append(v)\n\n # Make table as data frame\n about_table = pd.DataFrame(values, index=keywords)\n\n # Set column names\n about_table.columns = ['Value']\n about_table.index.name = 'Keyword'\n\n return about_table"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef show_open_file_dialog(filetypes):\n # The following line is used to Tk's main window is not shown\n Tk().withdraw()\n\n # OSX ONLY: Call bash script to prevent file select window from sticking\n # after use.\n if platform.system() == 'Darwin':\n subprocess.call(\"defaults write org.python.python \" +\n \"ApplePersistenceIgnoreState YES\", shell=True)\n filename = askopenfilename(filetypes=filetypes)\n subprocess.call(\"defaults write org.python.python \" +\n \"ApplePersistenceIgnoreState NO\", shell=True)\n else:\n filename = askopenfilename(filetypes=filetypes)\n\n return filename", "response": "Show an open file dialog and return the path of the selected file."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nruns the MS Excel User Interface. This function performs the following: 1. If `input_path` is not specified, show a dialog to choose an input Excel file. 2. Extract data from the Instruments, Beads, and Samples tables. 3. Process all the bead samples specified in the Beads table. 4. Generate statistics for each bead sample. 5. Process all the cell samples in the Samples table. 6. Generate statistics for each sample. 7. If requested, generate a histogram table for each fluorescent channel specified for each sample. 8. Generate a table with run time, date, FlowCal version, among others. 9. Save statistics and (if requested) histograms in an output Excel file. Parameters ---------- input_path : str Path to the Excel file to use as input. If None, show a dialog to select an input file. output_path : str Path to which to save the output Excel file. If None, use \"_output\". verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. hist_sheet : bool, optional Whether to generate a sheet in the output Excel file specifying histogram bin information.", "response": "def run(input_path=None,\n output_path=None,\n verbose=True,\n plot=True,\n hist_sheet=False):\n \"\"\"\n Run the MS Excel User Interface.\n\n This function performs the following:\n\n 1. If `input_path` is not specified, show a dialog to choose an input\n Excel file.\n 2. Extract data from the Instruments, Beads, and Samples tables.\n 3. Process all the bead samples specified in the Beads table.\n 4. Generate statistics for each bead sample.\n 5. Process all the cell samples in the Samples table.\n 6. Generate statistics for each sample.\n 7. If requested, generate a histogram table for each fluorescent\n channel specified for each sample.\n 8. Generate a table with run time, date, FlowCal version, among\n others.\n 9. Save statistics and (if requested) histograms in an output Excel\n file.\n\n Parameters\n ----------\n input_path : str\n Path to the Excel file to use as input. If None, show a dialog to\n select an input file.\n output_path : str\n Path to which to save the output Excel file. If None, use\n \"_output\".\n verbose : bool, optional\n Whether to print information messages during the execution of this\n function.\n plot : bool, optional\n Whether to generate and save density/histogram plots of each\n sample, and each beads sample.\n hist_sheet : bool, optional\n Whether to generate a sheet in the output Excel file specifying\n histogram bin information.\n\n \"\"\"\n\n # If input file has not been specified, show open file dialog\n if input_path is None:\n input_path = show_open_file_dialog(filetypes=[('Excel files',\n '*.xlsx')])\n if not input_path:\n if verbose:\n print(\"No input file selected.\")\n return\n # Extract directory, filename, and filename with no extension from path\n input_dir, input_filename = os.path.split(input_path)\n input_filename_no_ext, __ = os.path.splitext(input_filename)\n\n # Read relevant tables from workbook\n if verbose:\n print(\"Reading {}...\".format(input_filename))\n instruments_table = read_table(input_path,\n sheetname='Instruments',\n index_col='ID')\n beads_table = read_table(input_path,\n sheetname='Beads',\n index_col='ID')\n samples_table = read_table(input_path,\n sheetname='Samples',\n index_col='ID')\n\n # Process beads samples\n beads_samples, mef_transform_fxns, mef_outputs = process_beads_table(\n beads_table,\n instruments_table,\n base_dir=input_dir,\n verbose=verbose,\n plot=plot,\n plot_dir='plot_beads',\n full_output=True)\n\n # Add stats to beads table\n if verbose:\n print(\"\")\n print(\"Calculating statistics for beads...\")\n add_beads_stats(beads_table, beads_samples, mef_outputs)\n\n # Process samples\n samples = process_samples_table(\n samples_table,\n instruments_table,\n mef_transform_fxns=mef_transform_fxns,\n beads_table=beads_table,\n base_dir=input_dir,\n verbose=verbose,\n plot=plot,\n plot_dir='plot_samples')\n\n # Add stats to samples table\n if verbose:\n print(\"\")\n print(\"Calculating statistics for all samples...\")\n add_samples_stats(samples_table, samples)\n\n # Generate histograms\n if hist_sheet:\n if verbose:\n print(\"Generating histograms table...\")\n histograms_table = generate_histograms_table(samples_table, samples)\n\n # Generate about table\n about_table = generate_about_table({'Input file path': input_path})\n\n # Generate list of tables to save\n table_list = []\n table_list.append(('Instruments', instruments_table))\n table_list.append(('Beads', beads_table))\n table_list.append(('Samples', samples_table))\n if hist_sheet:\n table_list.append(('Histograms', histograms_table))\n table_list.append(('About Analysis', about_table))\n\n # Write output excel file\n if verbose:\n print(\"Saving output Excel file...\")\n if output_path is None:\n output_filename = \"{}_output.xlsx\".format(input_filename_no_ext)\n output_path = os.path.join(input_dir, output_filename)\n write_workbook(output_path, table_list)\n\n if verbose:\n print(\"\\nDone.\")"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef run_command_line(args=None):\n # Get arguments from ``sys.argv`` if necessary.\n # ``sys.argv`` has the name of the script as its first element. We remove\n # this element because it will break ``parser.parse_args()`` later. In fact,\n # ``parser.parse_args()``, if provided with no arguments, will also use\n # ``sys.argv`` after removing the first element.\n if args is None:\n args = sys.argv[1:]\n\n import argparse\n # Read command line arguments\n parser = argparse.ArgumentParser(\n description=\"process flow cytometry files with FlowCal's Excel UI.\")\n parser.add_argument(\n \"-i\",\n \"--inputpath\",\n type=str,\n nargs='?',\n help=\"input Excel file name. If not specified, show open file window\")\n parser.add_argument(\n \"-o\",\n \"--outputpath\",\n type=str,\n nargs='?',\n help=\"output Excel file name. If not specified, use [INPUTPATH]_output\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"print information about individual processing steps\")\n parser.add_argument(\n \"-p\",\n \"--plot\",\n action=\"store_true\",\n help=\"generate and save density plots/histograms of beads and samples\")\n parser.add_argument(\n \"-H\",\n \"--histogram-sheet\",\n action=\"store_true\",\n help=\"generate sheet in output Excel file specifying histogram bins\")\n args = parser.parse_args(args=args)\n\n # Run Excel UI\n run(input_path=args.inputpath,\n output_path=args.outputpath,\n verbose=args.verbose,\n plot=args.plot,\n hist_sheet=args.histogram_sheet)", "response": "Entry point for FlowCal and FlowCal console scripts."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef read_fcs_header_segment(buf, begin=0):\n fields = [\n 'version',\n 'text_begin',\n 'text_end',\n 'data_begin',\n 'data_end',\n 'analysis_begin',\n 'analysis_end']\n\n FCSHeader = collections.namedtuple('FCSHeader', fields)\n\n field_values = []\n\n buf.seek(begin)\n field_values.append(buf.read(10).decode(encoding).rstrip()) # version\n\n field_values.append(int(buf.read(8))) # text_begin\n field_values.append(int(buf.read(8))) # text_end\n field_values.append(int(buf.read(8))) # data_begin\n field_values.append(int(buf.read(8))) # data_end\n\n fv = buf.read(8).decode(encoding) # analysis_begin\n field_values.append(0 if fv == ' '*8 else int(fv))\n fv = buf.read(8).decode(encoding) # analysis_end\n field_values.append(0 if fv == ' '*8 else int(fv))\n\n header = FCSHeader._make(field_values)\n return header", "response": "Reads the HEADER segment of the FCS file and returns it as a sequence of namedtuple objects."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreading a TEXT segment of FCS file into a tree of lists.", "response": "def read_fcs_text_segment(buf, begin, end, delim=None, supplemental=False):\n \"\"\"\n Read TEXT segment of FCS file.\n\n Parameters\n ----------\n buf : file-like object\n Buffer containing data to interpret as TEXT segment.\n begin : int\n Offset (in bytes) to first byte of TEXT segment in `buf`.\n end : int\n Offset (in bytes) to last byte of TEXT segment in `buf`.\n delim : str, optional\n 1-byte delimiter character which delimits key-value entries of\n TEXT segment. If None and ``supplemental==False``, will extract\n delimiter as first byte of TEXT segment.\n supplemental : bool, optional\n Flag specifying that segment is a supplemental TEXT segment (see\n FCS3.0 and FCS3.1), in which case a delimiter (``delim``) must be\n specified.\n\n Returns\n -------\n text : dict\n Dictionary of key-value entries extracted from TEXT segment.\n delim : str or None\n String containing delimiter or None if TEXT segment is empty.\n\n Raises\n ------\n ValueError\n If supplemental TEXT segment (``supplemental==True``) but ``delim`` is\n not specified.\n ValueError\n If primary TEXT segment (``supplemental==False``) does not start with\n delimiter.\n ValueError\n If first keyword starts with delimiter (e.g. a primary TEXT segment\n with the following contents: ///k1/v1/k2/v2/).\n ValueError\n If odd number of keys + values detected (indicating an unpaired key or\n value).\n ValueError\n If TEXT segment is ill-formed (unable to be parsed according to the\n FCS standards).\n\n Notes\n -----\n ANALYSIS segments and supplemental TEXT segments are parsed the same way,\n so this function can also be used to parse ANALYSIS segments.\n\n This function does *not* automatically parse and accumulate additional\n TEXT-like segments (e.g. supplemental TEXT segments or ANALYSIS segments)\n referenced in the originally specified TEXT segment.\n\n References\n ----------\n .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman,\n \"Data file standard for flow cytometry. Data File Standards\n Committee of the Society for Analytical Cytology,\" Cytometry vol\n 11, pp 323-332, 1990, PMID 2340769.\n\n .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman,\n J.C. Wood, R.F. Murphy, \"Proposed new data file standard for flow\n cytometry, version FCS 3.0,\" Cytometry vol 28, pp 118-122, 1997,\n PMID 9181300.\n\n .. [3] J. Spidlen, et al, \"Data File Standard for Flow Cytometry,\n version FCS 3.1,\" Cytometry A vol 77A, pp 97-100, 2009, PMID\n 19937951.\n\n \"\"\"\n if delim is None:\n if supplemental:\n raise ValueError(\"must specify ``delim`` if reading supplemental\"\n + \" TEXT segment\")\n else:\n buf.seek(begin)\n delim = buf.read(1).decode(encoding)\n\n # The offsets are inclusive (meaning they specify first and last byte\n # WITHIN segment) and seeking is inclusive (read() after seek() reads the\n # byte which was seeked to). This means the length of the segment is\n # ((end+1) - begin).\n buf.seek(begin)\n raw = buf.read((end+1)-begin).decode(encoding)\n\n # If segment is empty, return empty dictionary as text\n if not raw:\n return {}, None\n\n if not supplemental:\n # Check that the first character of the TEXT segment is equal to the\n # delimiter.\n if raw[0] != delim:\n raise ValueError(\"primary TEXT segment should start with\"\n + \" delimiter\")\n\n # The FCS standards indicate that keyword values must be flanked by the\n # delimiter character, but they do not require that the TEXT segment end\n # with the delimiter. As such, look for the last instance of the delimiter\n # in the segment and retain everything before it (potentially removing\n # TEXT segment characters which occur after the last instance of the\n # delimiter).\n end_index = raw.rfind(delim)\n if supplemental and end_index == -1:\n # Delimiter was not found. This should only be permitted for an empty\n # supplemental TEXT segment (primary TEXT segment should fail above\n # if first character doesn't match delimiter).\n return {}, delim\n else:\n raw = raw[:end_index]\n\n pairs_list = raw.split(delim)\n\n ###\n # Reconstruct Keys and Values By Aggregating Escaped Delimiters\n ###\n # According to the FCS standards, delimiter characters are permitted in\n # keywords and keyword values as long as they are escaped by being\n # immediately repeated. Delimiter characters are not permitted as the\n # first character of a keyword or keyword value, however. Null (zero\n # length) keywords or keyword values are also not permitted. According to\n # these restrictions, a delimiter character should manifest itself as an\n # empty element in ``pairs_list``. As such, scan through ``pairs_list``\n # looking for empty elements and reconstruct keywords or keyword values\n # containing delimiters.\n ###\n\n # Start scanning from the end of the list since the end of the list is\n # well defined (i.e. doesn't depend on whether the TEXT segment is a\n # primary segment, which MUST start with the delimiter, or a supplemental\n # segment, which is not required to start with the delimiter).\n reconstructed_KV_accumulator = []\n idx = len(pairs_list) - 1\n while idx >= 0:\n if pairs_list[idx] == '':\n # Count the number of consecutive empty elements to determine how\n # many escaped delimiters exist and whether or not a true boundary\n # delimiter exists.\n num_empty_elements = 1\n idx = idx - 1\n while idx >=0 and pairs_list[idx] == '':\n num_empty_elements = num_empty_elements + 1\n idx = idx - 1\n # Need to differentiate between rolling off the bottom of the list\n # and hitting a non-empty element. Assessing pairs_list[-1] can\n # still be evaluated (by wrapping around the list, which is not\n # what we want to check) so check if we've finished scanning the\n # list first.\n if idx < 0:\n # We rolled off the bottom of the list.\n if num_empty_elements == 1:\n # If we only hit 1 empty element before rolling off the\n # list, then there were no escaped delimiters and the\n # segment started with a delimiter.\n break\n elif (num_empty_elements % 2) == 0:\n # Even number of empty elements.\n #\n # If this is a supplemental TEXT segment, this can be\n # interpreted as *not* starting the TEXT segment with the\n # delimiter (which is permitted for supplemental TEXT\n # segments) and starting the first keyword with one or\n # more delimiters, which is prohibited.\n if supplemental:\n raise ValueError(\"starting a TEXT segment keyword\"\n + \" with a delimiter is prohibited\")\n # If this is a primary TEXT segment, this is an ill-formed\n # segment. Rationale: 1 empty element will always be\n # consumed as the initial delimiter which a primary TEXT\n # segment is required to start with. After that delimiter\n # is accounted for, you now have either an unescaped\n # delimiter, which is prohibited, or a boundary delimiter,\n # which would imply that the entire first keyword was\n # composed of delimiters, which is prohibited because\n # keywords are not allowed to start with the delimiter).\n raise ValueError(\"ill-formed TEXT segment\")\n else:\n # Odd number of empty elements. This can be interpreted as\n # starting the segment with a delimiter and then having\n # one or more delimiters starting the first keyword, which\n # is prohibited.\n raise ValueError(\"starting a TEXT segment keyword with a\"\n + \" delimiter is prohibited\")\n else:\n # We encountered a non-empty element. Calculate the number of\n # escaped delimiters and whether or not a true boundary\n # delimiter is present.\n num_delim = (num_empty_elements+1)//2\n boundary_delim = (num_empty_elements % 2) == 0\n\n if boundary_delim:\n # A boundary delimiter exists. We know that the boundary\n # has to be on the right side (end of the sequence),\n # because keywords and keyword values are prohibited from\n # starting with a delimiter, which would be the case if the\n # boundary delimiter was anywhere BUT the last character.\n # This means we need to postpend the appropriate number of\n # delim characters to the end of the non-empty list\n # element that ``idx`` currently points to.\n pairs_list[idx] = pairs_list[idx] + (num_delim*delim)\n\n # We can now add the reconstructed keyword or keyword\n # value to the accumulator and move on. It's possible that\n # this keyword or keyword value is incompletely\n # reconstructed (e.g. /key//1///value1/\n # => {'key/1/':'value1'}; there are other delimiters in\n # this keyword or keyword value), but that case is now\n # handled independently of this case and just like any\n # other instance of escaped delimiters *without* a\n # boundary delimiter.\n reconstructed_KV_accumulator.append(pairs_list[idx])\n idx = idx - 1\n else:\n # No boundary delimiters exist, so we need to glue the\n # list elements before and after this sequence of\n # consecutive delimiters together with the appropriate\n # number of delimiters.\n if len(reconstructed_KV_accumulator) == 0:\n # Edge Case: The accumulator should always have at\n # least 1 element in it at this point. If it doesn't,\n # the last value ends with the delimiter, which will\n # result in two consecutive empty elements, which\n # won't fall into this case. Only 1 empty element\n # indicates an ill-formed TEXT segment with an\n # unpaired non-boundary delimiter (e.g. /k1/v1//),\n # which is not permitted. The ill-formed TEXT segment\n # implied by 1 empty element is recoverable, though,\n # and a use case which is known to exist, so throw a\n # warning and ignore the 2nd copy of the delimiter.\n warnings.warn(\"detected ill-formed TEXT segment (ends\"\n + \" with two delimiter characters).\"\n + \" Ignoring last delimiter character\")\n reconstructed_KV_accumulator.append(pairs_list[idx])\n else:\n reconstructed_KV_accumulator[-1] = pairs_list[idx] + \\\n (num_delim*delim) + reconstructed_KV_accumulator[-1]\n idx = idx - 1\n else:\n # Non-empty element, just append\n reconstructed_KV_accumulator.append(pairs_list[idx])\n idx = idx - 1\n\n pairs_list_reconstructed = list(reversed(reconstructed_KV_accumulator))\n\n # List length should be even since all key-value entries should be pairs\n if len(pairs_list_reconstructed) % 2 != 0:\n raise ValueError(\"odd # of (keys + values); unpaired key or value\")\n\n text = dict(zip(pairs_list_reconstructed[0::2],\n pairs_list_reconstructed[1::2]))\n\n return text, delim"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreads the DATA segment of the FCS file.", "response": "def read_fcs_data_segment(buf,\n begin,\n end,\n datatype,\n num_events,\n param_bit_widths,\n big_endian,\n param_ranges=None):\n \"\"\"\n Read DATA segment of FCS file.\n\n Parameters\n ----------\n buf : file-like object\n Buffer containing data to interpret as DATA segment.\n begin : int\n Offset (in bytes) to first byte of DATA segment in `buf`.\n end : int\n Offset (in bytes) to last byte of DATA segment in `buf`.\n datatype : {'I', 'F', 'D', 'A'}\n String specifying FCS file datatype (see $DATATYPE keyword from\n FCS standards). Supported datatypes include 'I' (unsigned\n binary integer), 'F' (single precision floating point), and 'D'\n (double precision floating point). 'A' (ASCII) is recognized\n but not supported.\n num_events : int\n Total number of events (see $TOT keyword from FCS standards).\n param_bit_widths : array-like\n Array specifying parameter (aka channel) bit width for each\n parameter (see $PnB keywords from FCS standards). The length of\n `param_bit_widths` should match the $PAR keyword value from the\n FCS standards (which indicates the total number of parameters).\n If `datatype` is 'I', data must be byte aligned (i.e. all\n parameter bit widths should be divisible by 8), and data are\n upcast to the nearest uint8, uint16, uint32, or uint64 data\n type. Bit widths larger than 64 bits are not supported.\n big_endian : bool\n Endianness of computer used to acquire data (see $BYTEORD\n keyword from FCS standards). True implies big endian; False\n implies little endian.\n param_ranges : array-like, optional\n Array specifying parameter (aka channel) range for each\n parameter (see $PnR keywords from FCS standards). Used to\n ensure erroneous values are not read from DATA segment by\n applying a bit mask to remove unused bits. The length of\n `param_ranges` should match the $PAR keyword value from the FCS\n standards (which indicates the total number of parameters). If\n None, no masking is performed.\n\n Returns\n -------\n data : numpy array\n NxD numpy array describing N cytometry events observing D data\n dimensions.\n\n Raises\n ------\n ValueError\n If lengths of `param_bit_widths` and `param_ranges` don't match.\n ValueError\n If calculated DATA segment size (as determined from the number\n of events, the number of parameters, and the number of bytes per\n data point) does not match size specified by `begin` and `end`.\n ValueError\n If `param_bit_widths` doesn't agree with `datatype` for single\n precision or double precision floating point (i.e. they should\n all be 32 or 64, respectively).\n ValueError\n If `datatype` is unrecognized.\n NotImplementedError\n If `datatype` is 'A'.\n NotImplementedError\n If `datatype` is 'I' but data is not byte aligned.\n\n References\n ----------\n .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman,\n \"Data file standard for flow cytometry. Data File Standards\n Committee of the Society for Analytical Cytology,\" Cytometry vol\n 11, pp 323-332, 1990, PMID 2340769.\n\n .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman,\n J.C. Wood, R.F. Murphy, \"Proposed new data file standard for flow\n cytometry, version FCS 3.0,\" Cytometry vol 28, pp 118-122, 1997,\n PMID 9181300.\n\n .. [3] J. Spidlen, et al, \"Data File Standard for Flow Cytometry,\n version FCS 3.1,\" Cytometry A vol 77A, pp 97-100, 2009, PMID\n 19937951.\n\n \"\"\"\n num_params = len(param_bit_widths)\n if (param_ranges is not None and len(param_ranges) != num_params):\n raise ValueError(\"param_bit_widths and param_ranges must have same\"\n + \" length\")\n\n shape = (int(num_events), num_params)\n\n if datatype == 'I':\n # Check if all parameters fit into preexisting data type\n if (all(bw == 8 for bw in param_bit_widths) or\n all(bw == 16 for bw in param_bit_widths) or\n all(bw == 32 for bw in param_bit_widths) or\n all(bw == 64 for bw in param_bit_widths)):\n\n num_bits = param_bit_widths[0]\n\n # Sanity check that the total # of bytes that we're about to\n # interpret is exactly the # of bytes in the DATA segment.\n # In some FCS files, the offset to the last byte (end) actually\n # points to the first byte of the next segment, in which case the #\n # of bytes specified in the header exceeds the # of bytes that we\n # should read by one.\n if (shape[0]*shape[1]*(num_bits//8)) != ((end+1)-begin) and \\\n (shape[0]*shape[1]*(num_bits//8)) != (end-begin):\n raise ValueError(\"DATA size does not match expected array\"\n + \" size (array size =\"\n + \" {0} bytes,\".format(shape[0]*shape[1]*(num_bits//8))\n + \" DATA segment size = {0} bytes)\".format((end+1)-begin))\n\n dtype = np.dtype('{0}u{1}'.format('>' if big_endian else '<',\n num_bits//8))\n data = np.memmap(\n buf,\n dtype=dtype,\n mode='r',\n offset=begin,\n shape=shape,\n order='C')\n\n # Cast memmap object to regular numpy array stored in memory (as\n # opposed to being backed by disk)\n data = np.array(data)\n else:\n # The FCS standards technically allows for parameters to NOT be\n # byte aligned, but parsing a DATA segment which is not byte\n # aligned requires significantly more computation (and probably an\n # external library which exposes bit level resolution to a block\n # of memory). I don't think this is a common use case, so I'm just\n # going to detect it and raise an error.\n if (not all(bw % 8 == 0 for bw in param_bit_widths) or\n any(bw > 64 for bw in param_bit_widths)):\n raise NotImplementedError(\"only byte aligned parameter bit\"\n + \" widths (bw % 8 = 0) <= 64 are supported\"\n + \" (param_bit_widths={0})\".format(param_bit_widths))\n\n # Read data in as a byte array\n byte_shape = (int(num_events),\n np.sum(np.array(param_bit_widths)//8))\n\n # Sanity check that the total # of bytes that we're about to\n # interpret is exactly the # of bytes in the DATA segment.\n # In some FCS files, the offset to the last byte (end) actually\n # points to the first byte of the next segment, in which case the #\n # of bytes specified in the header exceeds the # of bytes that we\n # should read by one.\n if (byte_shape[0]*byte_shape[1]) != ((end+1)-begin) and \\\n (byte_shape[0]*byte_shape[1]) != (end-begin):\n raise ValueError(\"DATA size does not match expected array\"\n + \" size (array size =\"\n + \" {0} bytes,\".format(byte_shape[0]*byte_shape[1])\n + \" DATA segment size = {0} bytes)\".format((end+1)-begin))\n\n byte_data = np.memmap(\n buf,\n dtype='uint8', # endianness doesn't matter for 1 byte\n mode='r',\n offset=begin,\n shape=byte_shape,\n order='C')\n\n # Upcast all data to fit nearest supported data type of largest\n # bit width\n upcast_bw = int(2**np.max(np.ceil(np.log2(param_bit_widths))))\n\n # Create new array of upcast data type and use byte data to\n # populate it. The new array will have endianness native to user's\n # machine; does not preserve endianness of stored FCS data.\n upcast_dtype = 'u{0}'.format(upcast_bw//8)\n data = np.zeros(shape,dtype=upcast_dtype)\n\n # Array mapping each column of data to first corresponding column\n # in byte_data\n byte_boundaries = np.roll(np.cumsum(param_bit_widths)//8,1)\n byte_boundaries[0] = 0\n\n # Reconstitute columns of data by bit shifting appropriate columns\n # in byte_data and accumulating them\n for col in range(data.shape[1]):\n num_bytes = param_bit_widths[col]//8\n for b in range(num_bytes):\n byte_data_col = byte_boundaries[col] + b\n byteshift = (num_bytes-b-1) if big_endian else b\n\n if byteshift > 0:\n # byte_data must be upcast or else bit shift fails\n data[:,col] += \\\n byte_data[:,byte_data_col].astype(upcast_dtype) \\\n << (byteshift*8)\n else:\n data[:,col] += byte_data[:,byte_data_col]\n\n if param_ranges is not None:\n # To strictly follow the FCS standards, mask off the unused high bits\n # as specified by param_ranges.\n for col in range(data.shape[1]):\n # bits_used should be related to resolution of cytometer ADC\n bits_used = int(np.ceil(np.log2(param_ranges[col])))\n\n # Create a bit mask to mask off all but the lowest bits_used bits.\n # bitmask is a native python int type which does not have an\n # underlying size. The int type is effectively left-padded with\n # 0s (infinitely), and the '&' operation preserves the dataype of\n # the array, so this shouldn't be an issue.\n bitmask = ~((~0) << bits_used)\n data[:,col] &= bitmask\n\n elif datatype in ('F','D'):\n num_bits = 32 if datatype == 'F' else 64\n\n # Confirm that bit widths are consistent with data type\n if not all(bw == num_bits for bw in param_bit_widths):\n raise ValueError(\"all param_bit_widths should be\"\n + \" {0} if datatype =\".format(num_bits)\n + \" \\'{0}\\' (param_bit_widths=\".format(datatype)\n + \"{0})\".format(param_bit_widths))\n\n # Sanity check that the total # of bytes that we're about to interpret\n # is exactly the # of bytes in the DATA segment.\n # In some FCS files, the offset to the last byte (end) actually points\n # to the first byte of the next segment, in which case the # of bytes\n # specified in the header exceeds the # of bytes that we should read by\n # one.\n if (shape[0]*shape[1]*(num_bits//8)) != ((end+1)-begin) and \\\n (shape[0]*shape[1]*(num_bits//8)) != (end-begin):\n raise ValueError(\"DATA size does not match expected array size\"\n + \" (array size = {0}\".format(shape[0]*shape[1]*(num_bits//8))\n + \" bytes, DATA segment size =\"\n + \" {0} bytes)\".format((end+1)-begin))\n\n dtype = np.dtype('{0}f{1}'.format('>' if big_endian else '<',\n num_bits//8))\n data = np.memmap(\n buf,\n dtype=dtype,\n mode='r',\n offset=begin,\n shape=shape,\n order='C')\n\n # Cast memmap object to regular numpy array stored in memory (as\n # opposed to being backed by disk)\n data = np.array(data)\n elif datatype == 'A':\n raise NotImplementedError(\"only \\'I\\' (unsigned binary integer),\"\n + \" \\'F\\' (single precision floating point), and \\'D\\' (double\"\n + \" precision floating point) data types are supported (detected\"\n + \" datatype=\\'{0}\\')\".format(datatype))\n else:\n raise ValueError(\"unrecognized datatype (detected datatype=\"\n + \"\\'{0}\\')\".format(datatype))\n\n return data"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef acquisition_time(self):\n # Get time channels indices\n time_channel_idx = [idx\n for idx, channel in enumerate(self.channels)\n if channel.lower() == 'time']\n if len(time_channel_idx) > 1:\n raise KeyError(\"more than one time channel in data\")\n # Check if the time channel is available\n elif len(time_channel_idx) == 1:\n # Use the event list\n time_channel = self.channels[time_channel_idx[0]]\n return (self[-1, time_channel] - self[0, time_channel]) \\\n * self.time_step\n elif (self._acquisition_start_time is not None and\n self._acquisition_end_time is not None):\n # Use start_time and end_time:\n dt = (self._acquisition_end_time - self._acquisition_start_time)\n return dt.total_seconds()\n else:\n return None", "response": "Returns the acquisition time in seconds."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the amplification type for the specified channels.", "response": "def amplification_type(self, channels=None):\n \"\"\"\n Get the amplification type used for the specified channel(s).\n\n Each channel uses one of two amplification types: linear or\n logarithmic. This function returns, for each channel, a tuple of\n two numbers, in which the first number indicates the number of\n decades covered by the logarithmic amplifier, and the second\n indicates the linear value corresponding to the channel value zero.\n If the first value is zero, the amplifier used is linear\n\n The amplification type for channel \"n\" is extracted from the\n required $PnE parameter.\n\n Parameters\n ----------\n channels : int, str, list of int, list of str\n Channel(s) for which to get the amplification type. If None,\n return a list with the amplification type of all channels, in\n the order of ``FCSData.channels``.\n\n Return\n ------\n tuple, or list of tuples\n The amplification type of the specified channel(s). This is\n reported as a tuple, in which the first element indicates how\n many decades the logarithmic amplifier covers, and the second\n indicates the linear value that corresponds to a channel value\n of zero. If the first element is zero, the amplification type\n is linear.\n\n \"\"\"\n # Check default\n if channels is None:\n channels = self._channels\n\n # Get numerical indices of channels\n channels = self._name_to_index(channels)\n\n # Get detector type of the specified channels\n if hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types):\n return [self._amplification_type[ch] for ch in channels]\n else:\n return self._amplification_type[channels]"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting the detector voltage for the specified channels.", "response": "def detector_voltage(self, channels=None):\n \"\"\"\n Get the detector voltage used for the specified channel(s).\n\n The detector voltage for channel \"n\" is extracted from the $PnV\n parameter, if available.\n\n Parameters\n ----------\n channels : int, str, list of int, list of str\n Channel(s) for which to get the detector voltage. If None,\n return a list with the detector voltage of all channels, in the\n order of ``FCSData.channels``.\n\n Return\n ------\n float or list of float\n The detector voltage of the specified channel(s). If no\n information about the detector voltage is found for a channel,\n return None.\n\n \"\"\"\n # Check default\n if channels is None:\n channels = self._channels\n\n # Get numerical indices of channels\n channels = self._name_to_index(channels)\n\n # Get detector type of the specified channels\n if hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types):\n return [self._detector_voltage[ch] for ch in channels]\n else:\n return self._detector_voltage[channels]"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef amplifier_gain(self, channels=None):\n # Check default\n if channels is None:\n channels = self._channels\n\n # Get numerical indices of channels\n channels = self._name_to_index(channels)\n\n # Get detector type of the specified channels\n if hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types):\n return [self._amplifier_gain[ch] for ch in channels]\n else:\n return self._amplifier_gain[channels]", "response": "Get the amplifier gain used for the specified channels."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef range(self, channels=None):\n # Check default\n if channels is None:\n channels = self._channels\n\n # Get numerical indices of channels\n channels = self._name_to_index(channels)\n\n # Get the range of the specified channels\n if hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types):\n return [self._range[ch] for ch in channels]\n else:\n return self._range[channels]", "response": "Returns the range of the event in the specified channels."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef resolution(self, channels=None):\n # Check default\n if channels is None:\n channels = self._channels\n\n # Get numerical indices of channels\n channels = self._name_to_index(channels)\n\n # Get resolution of the specified channels\n if hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types):\n return [self._resolution[ch] for ch in channels]\n else:\n return self._resolution[channels]", "response": "Get the resolution of the specified channels."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef hist_bins(self, channels=None, nbins=None, scale='logicle', **kwargs):\n # Default: all channels\n if channels is None:\n channels = list(self._channels)\n\n # Get numerical indices of channels\n channels = self._name_to_index(channels)\n\n # Convert to list if necessary\n channel_list = channels\n if not isinstance(channel_list, list):\n channel_list = [channel_list]\n if not isinstance(nbins, list):\n nbins = [nbins]*len(channel_list)\n if not isinstance(scale, list):\n scale = [scale]*len(channel_list)\n\n # Iterate\n bins = []\n for channel, nbins_channel, scale_channel in \\\n zip(channel_list, nbins, scale):\n # Get channel resolution\n res_channel = self.resolution(channel)\n # Get default nbins\n if nbins_channel is None:\n nbins_channel = res_channel\n # Get range of channel\n range_channel = self.range(channel)\n # Generate bins according to specified scale\n if scale_channel == 'linear':\n # We will now generate ``nbins`` uniformly spaced bins centered\n # at ``linspace(range_channel[0], range_channel[1], nbins)``. To\n # do so, we need to generate ``nbins + 1`` uniformly spaced\n # points.\n delta_res = (range_channel[1] - range_channel[0]) / \\\n (res_channel - 1)\n bins_channel = np.linspace(range_channel[0] - delta_res/2,\n range_channel[1] + delta_res/2,\n nbins_channel + 1)\n\n elif scale_channel == 'log':\n # Check if the lower limit is equal or less than zero. If so,\n # change the lower limit to one or some lower value, such that\n # the range covers at least five decades.\n if range_channel[0] <= 0:\n range_channel[0] = min(1., range_channel[1]/1e5)\n # Log range\n range_channel = [np.log10(range_channel[0]),\n np.log10(range_channel[1])]\n # We will now generate ``nbins`` uniformly spaced bins centered\n # at ``linspace(range_channel[0], range_channel[1], nbins)``. To\n # do so, we need to generate ``nbins + 1`` uniformly spaced\n # points.\n delta_res = (range_channel[1] - range_channel[0]) / \\\n (res_channel - 1)\n bins_channel = np.linspace(range_channel[0] - delta_res/2,\n range_channel[1] + delta_res/2,\n nbins_channel + 1)\n # Exponentiate bins\n bins_channel = 10**(bins_channel)\n\n elif scale_channel == 'logicle':\n # Create transform class\n # Use the LogicleTransform class from the plot module\n t = FlowCal.plot._LogicleTransform(data=self,\n channel=channel,\n **kwargs)\n # We now generate ``nbins`` uniformly spaced bins centered at\n # ``linspace(0, M, nbins)``. To do so, we need to generate\n # ``nbins + 1`` uniformly spaced points.\n delta_res = float(t.M) / (res_channel - 1)\n s = np.linspace(- delta_res/2.,\n t.M + delta_res/2.,\n nbins_channel + 1)\n # Finally, apply the logicle transformation to generate bins\n bins_channel = t.transform_non_affine(s)\n\n else:\n # Scale not supported\n raise ValueError('scale \"{}\" not supported'.format(\n scale_channel))\n # Accumulate\n bins.append(bins_channel)\n\n # Extract from list if channels was not a list\n if not isinstance(channels, list):\n bins = bins[0]\n\n return bins", "response": "Returns a list of histogram bin edges for the specified channel."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _parse_time_string(time_str):\n # If input is None, return None\n if time_str is None:\n return None\n\n time_l = time_str.split(':')\n if len(time_l) == 3:\n # Either 'hh:mm:ss' or 'hh:mm:ss.cc'\n if '.' in time_l[2]:\n # 'hh:mm:ss.cc' format\n time_str = time_str.replace('.', ':')\n else:\n # 'hh:mm:ss' format\n time_str = time_str + ':0'\n # Attempt to parse string, return None if not possible\n try:\n t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time()\n except:\n t = None\n elif len(time_l) == 4:\n # 'hh:mm:ss:tt' format\n time_l[3] = '{:06d}'.format(int(float(time_l[3])*1e6/60))\n time_str = ':'.join(time_l)\n # Attempt to parse string, return None if not possible\n try:\n t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time()\n except:\n t = None\n else:\n # Unknown format\n t = None\n\n return t", "response": "Parse a string representation of a time into a datetime. time object."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nparse a string representation of a date into a datetime. date object.", "response": "def _parse_date_string(date_str):\n \"\"\"\n Get a datetime.date object from a string date representation.\n\n The FCS standard includes an optional keyword parameter $DATE in\n which the acquistion date is stored. In FCS 2.0, the date is saved\n as 'dd-mmm-yy', whereas in FCS 3.0 and 3.1 the date is saved as\n 'dd-mmm-yyyy'.\n\n This function attempts to parse these formats, along with a couple\n of nonstandard ones, using the datetime module.\n\n Parameters:\n -----------\n date_str : str, or None\n String representation of date, or None.\n\n Returns:\n --------\n t : datetime.datetime, or None\n Date parsed from `date_str`. If parsing was not possible,\n return None. If `date_str` is None, return None\n\n \"\"\"\n # If input is None, return None\n if date_str is None:\n return None\n\n # Standard format for FCS2.0\n try:\n return datetime.datetime.strptime(date_str, '%d-%b-%y')\n except ValueError:\n pass\n # Standard format for FCS3.0\n try:\n return datetime.datetime.strptime(date_str, '%d-%b-%Y')\n except ValueError:\n pass\n # Nonstandard format 1\n try:\n return datetime.datetime.strptime(date_str, '%y-%b-%d')\n except ValueError:\n pass\n # Nonstandard format 2\n try:\n return datetime.datetime.strptime(date_str, '%Y-%b-%d')\n except ValueError:\n pass\n\n # If none of these formats work, return None\n return None"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _name_to_index(self, channels):\n # Check if list, then run recursively\n if hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types):\n return [self._name_to_index(ch) for ch in channels]\n\n if isinstance(channels, six.string_types):\n # channels is a string containing a channel name\n if channels in self.channels:\n return self.channels.index(channels)\n else:\n raise ValueError(\"{} is not a valid channel name.\"\n .format(channels))\n\n if isinstance(channels, int):\n if (channels < len(self.channels)\n and channels >= -len(self.channels)):\n return channels\n else:\n raise ValueError(\"index out of range\")\n\n else:\n raise TypeError(\"input argument should be an integer, string or \"\n \"list of integers or strings\")", "response": "Returns the index of the specified channel names."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nscrape version information from specified file path.", "response": "def find_version(file_path):\n \"\"\"\n Scrape version information from specified file path.\n\n \"\"\"\n with open(file_path, 'r') as f:\n file_contents = f.read()\n version_match = re.search(r\"^__version__\\s*=\\s*['\\\"]([^'\\\"]*)['\\\"]\",\n file_contents, re.M)\n if version_match:\n return version_match.group(1)\n else:\n raise RuntimeError(\"unable to find version string\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef start_end(data, num_start=250, num_end=100, full_output=False):\n\n if num_start < 0:\n num_start = 0\n if num_end < 0:\n num_end = 0\n\n if data.shape[0] < (num_start + num_end):\n raise ValueError('Number of events to discard greater than total' + \n ' number of events.')\n \n mask = np.ones(shape=data.shape[0],dtype=bool)\n mask[:num_start] = False\n if num_end > 0:\n # catch the edge case where `num_end=0` causes mask[-num_end:] to mask\n # off all events\n mask[-num_end:] = False\n gated_data = data[mask]\n\n if full_output:\n StartEndGateOutput = collections.namedtuple(\n 'StartEndGateOutput',\n ['gated_data', 'mask'])\n return StartEndGateOutput(gated_data=gated_data, mask=mask)\n else:\n return gated_data", "response": "Gate out first and last events from beginning and end of a flow cytometry data."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef high_low(data, channels=None, high=None, low=None, full_output=False):\n # Extract channels in which to gate\n if channels is None:\n data_ch = data\n else:\n data_ch = data[:,channels]\n if data_ch.ndim == 1:\n data_ch = data_ch.reshape((-1,1))\n\n # Default values for high and low\n if high is None:\n if hasattr(data_ch, 'range'):\n high = [np.Inf if di is None else di[1] for di in data_ch.range()]\n high = np.array(high)\n else:\n high = np.Inf\n if low is None:\n if hasattr(data_ch, 'range'):\n low = [-np.Inf if di is None else di[0] for di in data_ch.range()]\n low = np.array(low)\n else:\n low = -np.Inf\n\n # Gate\n mask = np.all((data_ch < high) & (data_ch > low), axis = 1)\n gated_data = data[mask]\n\n if full_output:\n HighLowGateOutput = collections.namedtuple(\n 'HighLowGateOutput',\n ['gated_data', 'mask'])\n return HighLowGateOutput(gated_data=gated_data, mask=mask)\n else:\n return gated_data", "response": "Gate out high and low values across all specified channels."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef ellipse(data, channels,\n center, a, b, theta=0,\n log=False, full_output=False):\n \"\"\"\n Gate that preserves events inside an ellipse-shaped region.\n\n Events are kept if they satisfy the following relationship::\n\n (x/a)**2 + (y/b)**2 <= 1\n\n where `x` and `y` are the coordinates of the event list, after\n substracting `center` and rotating by -`theta`. This is mathematically\n equivalent to maintaining the events inside an ellipse with major\n axis `a`, minor axis `b`, center at `center`, and tilted by `theta`.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : list of int, list of str\n Two channels on which to perform gating.\n center, a, b, theta (optional) : float\n Ellipse parameters. `a` is the major axis, `b` is the minor axis.\n log : bool, optional\n Flag specifying that log10 transformation should be applied to\n `data` before gating.\n full_output : bool, optional\n Flag specifying to return additional outputs. If true, the outputs\n are given as a namedtuple.\n\n Returns\n -------\n gated_data : FCSData or numpy array\n Gated flow cytometry data of the same format as `data`.\n mask : numpy array of bool, only if ``full_output==True``\n Boolean gate mask used to gate data such that ``gated_data =\n data[mask]``.\n contour : list of 2D numpy arrays, only if ``full_output==True``\n List of 2D numpy array(s) of x-y coordinates tracing out\n the edge of the gated region.\n\n Raises\n ------\n ValueError\n If more or less than 2 channels are specified.\n\n \"\"\"\n # Extract channels in which to gate\n if len(channels) != 2:\n raise ValueError('2 channels should be specified.')\n data_ch = data[:,channels].view(np.ndarray)\n\n # Log if necessary\n if log:\n data_ch = np.log10(data_ch)\n\n # Center\n center = np.array(center)\n data_centered = data_ch - center\n\n # Rotate\n R = np.array([[np.cos(theta), np.sin(theta)],\n [-np.sin(theta), np.cos(theta)]])\n data_rotated = np.dot(data_centered, R.T)\n\n # Generate mask\n mask = ((data_rotated[:,0]/a)**2 + (data_rotated[:,1]/b)**2 <= 1)\n\n # Gate\n data_gated = data[mask]\n\n if full_output:\n # Calculate contour\n t = np.linspace(0,1,100)*2*np.pi\n ci = np.array([a*np.cos(t), b*np.sin(t)]).T\n ci = np.dot(ci, R) + center\n if log:\n ci = 10**ci\n cntr = [ci]\n\n # Build output namedtuple\n EllipseGateOutput = collections.namedtuple(\n 'EllipseGateOutput',\n ['gated_data', 'mask', 'contour'])\n return EllipseGateOutput(\n gated_data=data_gated, mask=mask, contour=cntr)\n else:\n return data_gated", "response": "Returns a new NxD - style grid of events in an ellipse - shaped region."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef density2d(data,\n channels=[0,1],\n bins=1024,\n gate_fraction=0.65,\n xscale='logicle',\n yscale='logicle',\n sigma=10.0,\n full_output=False):\n \"\"\"\n Gate that preserves events in the region with highest density.\n\n Gate out all events in `data` but those near regions of highest\n density for the two specified channels.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : list of int, list of str, optional\n Two channels on which to perform gating.\n bins : int or array_like or [int, int] or [array, array], optional\n Bins used for gating:\n\n - If None, use ``data.hist_bins`` to obtain bin edges for both\n axes. None is not allowed if ``data.hist_bins`` is not\n available.\n - If int, `bins` specifies the number of bins to use for both\n axes. If ``data.hist_bins`` exists, it will be used to generate\n a number `bins` of bins.\n - If array_like, `bins` directly specifies the bin edges to use\n for both axes.\n - If [int, int], each element of `bins` specifies the number of\n bins for each axis. If ``data.hist_bins`` exists, use it to\n generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.\n - If [array, array], each element of `bins` directly specifies\n the bin edges to use for each axis.\n - Any combination of the above, such as [int, array], [None,\n int], or [array, int]. In this case, None indicates to generate\n bin edges using ``data.hist_bins`` as above, int indicates the\n number of bins to generate, and an array directly indicates the\n bin edges. Note that None is not allowed if ``data.hist_bins``\n does not exist.\n gate_fraction : float, optional\n Fraction of events to retain after gating. Should be between 0 and\n 1, inclusive.\n xscale : str, optional\n Scale of the bins generated for the x axis, either ``linear``,\n ``log``, or ``logicle``. `xscale` is ignored in `bins` is an array\n or a list of arrays.\n yscale : str, optional\n Scale of the bins generated for the y axis, either ``linear``,\n ``log``, or ``logicle``. `yscale` is ignored in `bins` is an array\n or a list of arrays.\n sigma : scalar or sequence of scalars, optional\n Standard deviation for Gaussian kernel used by\n `scipy.ndimage.filters.gaussian_filter` to smooth 2D histogram\n into a density.\n full_output : bool, optional\n Flag specifying to return additional outputs. If true, the outputs\n are given as a namedtuple.\n\n Returns\n -------\n gated_data : FCSData or numpy array\n Gated flow cytometry data of the same format as `data`.\n mask : numpy array of bool, only if ``full_output==True``\n Boolean gate mask used to gate data such that ``gated_data =\n data[mask]``.\n contour : list of 2D numpy arrays, only if ``full_output==True``\n List of 2D numpy array(s) of x-y coordinates tracing out\n the edge of the gated region.\n\n Raises\n ------\n ValueError\n If more or less than 2 channels are specified.\n ValueError\n If `data` has less than 2 dimensions or less than 2 events.\n Exception\n If an unrecognized matplotlib Path code is encountered when\n attempting to generate contours.\n\n Notes\n -----\n The algorithm for gating based on density works as follows:\n\n 1) Calculate 2D histogram of `data` in the specified channels.\n 2) Map each event from `data` to its histogram bin (implicitly\n gating out any events which exist outside specified `bins`).\n 3) Use `gate_fraction` to determine number of events to retain\n (rounded up). Only events which are not implicitly gated out\n are considered.\n 4) Smooth 2D histogram using a 2D Gaussian filter.\n 5) Normalize smoothed histogram to obtain valid probability mass\n function (PMF).\n 6) Sort bins by probability.\n 7) Accumulate events (starting with events belonging to bin with\n highest probability (\"densest\") and proceeding to events\n belonging to bins with lowest probability) until at least the\n desired number of events is achieved. While the algorithm\n attempts to get as close to `gate_fraction` fraction of events\n as possible, more events may be retained based on how many\n events fall into each histogram bin (since entire bins are\n retained at a time, not individual events).\n\n \"\"\"\n\n # Extract channels in which to gate\n if len(channels) != 2:\n raise ValueError('2 channels should be specified')\n data_ch = data[:,channels]\n if data_ch.ndim == 1:\n data_ch = data_ch.reshape((-1,1))\n\n # Check gating fraction\n if gate_fraction < 0 or gate_fraction > 1:\n raise ValueError('gate fraction should be between 0 and 1, inclusive')\n\n # Check dimensions\n if data_ch.ndim < 2:\n raise ValueError('data should have at least 2 dimensions')\n if data_ch.shape[0] <= 1:\n raise ValueError('data should have more than one event')\n\n # Build output namedtuple if necessary\n if full_output:\n Density2dGateOutput = collections.namedtuple(\n 'Density2dGateOutput',\n ['gated_data', 'mask', 'contour'])\n\n # If ``data_ch.hist_bins()`` exists, obtain bin edges from it if\n # necessary.\n if hasattr(data_ch, 'hist_bins') and \\\n hasattr(data_ch.hist_bins, '__call__'):\n # Check whether `bins` contains information for one or two axes\n if hasattr(bins, '__iter__') and len(bins)==2:\n # `bins` contains separate information for both axes\n # If bins for the X axis is not an iterable, get bin edges from\n # ``data_ch.hist_bins()``.\n if not hasattr(bins[0], '__iter__'):\n bins[0] = data_ch.hist_bins(channels=0,\n nbins=bins[0],\n scale=xscale)\n # If bins for the Y axis is not an iterable, get bin edges from\n # ``data_ch.hist_bins()``.\n if not hasattr(bins[1], '__iter__'):\n bins[1] = data_ch.hist_bins(channels=1,\n nbins=bins[1],\n scale=yscale)\n else:\n # `bins` contains information for one axis, which will be used\n # twice.\n # If bins is not an iterable, get bin edges from\n # ``data_ch.hist_bins()``.\n if not hasattr(bins, '__iter__'):\n bins = [data_ch.hist_bins(channels=0,\n nbins=bins,\n scale=xscale),\n data_ch.hist_bins(channels=1,\n nbins=bins,\n scale=yscale)]\n\n # Make 2D histogram\n H,xe,ye = np.histogram2d(data_ch[:,0], data_ch[:,1], bins=bins)\n\n # Map each event to its histogram bin by sorting events into a 2D array of\n # lists which mimics the histogram.\n #\n # Use np.digitize to calculate the histogram bin index for each event\n # given the histogram bin edges. Note that the index returned by\n # np.digitize is such that bins[i-1] <= x < bins[i], whereas indexing the\n # histogram will result in the following: hist[i,j] = bin corresponding to\n # xedges[i] <= x < xedges[i+1] and yedges[i] <= y < yedges[i+1].\n # Therefore, we need to subtract 1 from the np.digitize result to be able\n # to index into the appropriate bin in the histogram.\n event_indices = np.arange(data_ch.shape[0])\n x_bin_indices = np.digitize(data_ch[:,0], bins=xe) - 1\n y_bin_indices = np.digitize(data_ch[:,1], bins=ye) - 1\n\n # In the current version of numpy, there exists a disparity in how\n # np.histogram and np.digitize treat the rightmost bin edge (np.digitize\n # is not the strict inverse of np.histogram). Specifically, np.histogram\n # treats the rightmost bin interval as fully closed (rightmost bin edge is\n # included in rightmost bin), whereas np.digitize treats all bins as\n # half-open (you can specify which side is closed and which side is open;\n # `right` parameter). The expected behavior for this gating function is to\n # mimic np.histogram behavior, so we must reconcile this disparity.\n x_bin_indices[data_ch[:,0] == xe[-1]] = len(xe)-2\n y_bin_indices[data_ch[:,1] == ye[-1]] = len(ye)-2\n\n # Ignore (gate out) events which exist outside specified bins.\n # `np.digitize()-1` will assign events less than `bins` to bin \"-1\" and\n # events greater than `bins` to len(bins)-1.\n outlier_mask = (\n (x_bin_indices == -1) |\n (x_bin_indices == len(xe)-1) |\n (y_bin_indices == -1) |\n (y_bin_indices == len(ye)-1))\n\n event_indices = event_indices[~outlier_mask]\n x_bin_indices = x_bin_indices[~outlier_mask]\n y_bin_indices = y_bin_indices[~outlier_mask]\n\n # Create a 2D array of lists mimicking the histogram to accumulate events\n # associated with each bin.\n filler = np.frompyfunc(lambda x: list(), 1, 1)\n H_events = np.empty_like(H, dtype=np.object)\n filler(H_events, H_events)\n\n for event_idx, x_bin_idx, y_bin_idx in \\\n zip(event_indices, x_bin_indices, y_bin_indices):\n H_events[x_bin_idx, y_bin_idx].append(event_idx)\n\n # Determine number of events to keep. Only consider events which have not\n # been thrown out as outliers.\n n = int(np.ceil(gate_fraction*float(len(event_indices))))\n\n # n = 0 edge case (e.g. if gate_fraction = 0.0); incorrectly handled below\n if n == 0:\n mask = np.zeros(shape=data_ch.shape[0], dtype=bool)\n gated_data = data[mask]\n if full_output:\n return Density2dGateOutput(\n gated_data=gated_data, mask=mask, contour=[])\n else:\n return gated_data\n\n # Smooth 2D histogram\n sH = scipy.ndimage.filters.gaussian_filter(\n H,\n sigma=sigma,\n order=0,\n mode='constant',\n cval=0.0,\n truncate=6.0)\n\n # Normalize smoothed histogram to make it a valid probability mass function\n D = sH / np.sum(sH)\n\n # Sort bins by density\n vD = D.ravel()\n vH = H.ravel()\n sidx = np.argsort(vD)[::-1]\n svH = vH[sidx] # linearized counts array sorted by density\n\n # Find minimum number of accepted bins needed to reach specified number\n # of events\n csvH = np.cumsum(svH)\n Nidx = np.nonzero(csvH >= n)[0][0] # we want to include this index\n\n # Get indices of events to keep\n vH_events = H_events.ravel()\n accepted_indices = vH_events[sidx[:(Nidx+1)]]\n accepted_indices = np.array([item # flatten list of lists\n for sublist in accepted_indices\n for item in sublist])\n accepted_indices = np.sort(accepted_indices)\n\n # Convert list of accepted indices to boolean mask array\n mask = np.zeros(shape=data.shape[0], dtype=bool)\n mask[accepted_indices] = True\n\n gated_data = data[mask]\n\n if full_output:\n # Use scikit-image to find the contour of the gated region\n #\n # To find the contour of the gated region, values in the 2D probability\n # mass function ``D`` are used to trace contours at the level of the\n # probability associated with the last accepted bin, ``vD[sidx[Nidx]]``.\n\n # find_contours() specifies contours as collections of row and column\n # indices into the density matrix. The row or column index may be\n # interpolated (i.e. non-integer) for greater precision.\n contours_ij = skimage.measure.find_contours(D, vD[sidx[Nidx]])\n\n # Map contours from indices into density matrix to histogram x and y\n # coordinate spaces (assume values in the density matrix are associated\n # with histogram bin centers).\n xc = (xe[:-1] + xe[1:]) / 2.0 # x-axis bin centers\n yc = (ye[:-1] + ye[1:]) / 2.0 # y-axis bin centers\n\n contours = [np.array([np.interp(contour_ij[:,0],\n np.arange(len(xc)),\n xc),\n np.interp(contour_ij[:,1],\n np.arange(len(yc)),\n yc)]).T\n for contour_ij in contours_ij]\n\n return Density2dGateOutput(\n gated_data=gated_data, mask=mask, contour=contours)\n else:\n return gated_data", "response": "Generates a sequence of states that preserves events in the region with highest density for the two specified channels."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\napplies some transformation function to flow cytometry data. This function is a template transformation function, intended to be used by other specific transformation functions. It performs basic checks on `channels` and `data`. It then applies `transform_fxn` to the specified channels. Finally, it rescales ``data.range`` and if necessary. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int, str, list of int, list of str, optional Channels on which to perform the transformation. If `channels` is None, use def_channels. transform_fxn : function Function that performs the actual transformation. def_channels : int, str, list of int, list of str, optional Default set of channels in which to perform the transformation. If `def_channels` is None, use all channels. Returns ------- data_t : FCSData or numpy array NxD transformed flow cytometry data.", "response": "def transform(data, channels, transform_fxn, def_channels = None):\n \"\"\"\n Apply some transformation function to flow cytometry data.\n\n This function is a template transformation function, intended to be\n used by other specific transformation functions. It performs basic\n checks on `channels` and `data`. It then applies `transform_fxn` to the\n specified channels. Finally, it rescales ``data.range`` and if\n necessary.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : int, str, list of int, list of str, optional\n Channels on which to perform the transformation. If `channels` is\n None, use def_channels.\n transform_fxn : function\n Function that performs the actual transformation.\n def_channels : int, str, list of int, list of str, optional\n Default set of channels in which to perform the transformation.\n If `def_channels` is None, use all channels.\n\n Returns\n -------\n data_t : FCSData or numpy array\n NxD transformed flow cytometry data.\n\n \"\"\"\n # Copy data array\n data_t = data.copy().astype(np.float64)\n\n # Default\n if channels is None:\n if def_channels is None:\n channels = range(data_t.shape[1])\n else:\n channels = def_channels\n\n # Convert channels to iterable\n if not (hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types)):\n channels = [channels]\n\n # Apply transformation\n data_t[:,channels] = transform_fxn(data_t[:,channels])\n\n # Apply transformation to ``data.range``\n if hasattr(data_t, '_range'):\n for channel in channels:\n # Transform channel name to index if necessary\n channel_idx = data_t._name_to_index(channel)\n if data_t._range[channel_idx] is not None:\n data_t._range[channel_idx] = \\\n transform_fxn(data_t._range[channel_idx])\n\n return data_t"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ntransforms the flow cytometry data to Relative Fluorescence Units RFI.", "response": "def to_rfi(data,\n channels=None,\n amplification_type=None,\n amplifier_gain=None,\n resolution=None):\n \"\"\"\n Transform flow cytometry data to Relative Fluorescence Units (RFI).\n\n If ``amplification_type[0]`` is different from zero, data has been\n taken using a log amplifier. Therefore, to transform to RFI, the\n following operation is applied::\n\n y = a[1]*10^(a[0] * (x/r))\n\n Where ``x`` and ``y`` are the original and transformed data,\n respectively; ``a`` is `amplification_type` argument, and ``r`` is\n `resolution`. This will transform flow cytometry data taken with a log\n amplifier and an ADC of range ``r`` to linear RFIs, such\n that it covers ``a[0]`` decades of signal with a minimum value of\n ``a[1]``.\n\n If ``amplification_type[0]==0``, however, a linear amplifier has been\n used and the following operation is applied instead::\n\n y = x/g\n\n Where ``g`` is `amplifier_gain`. This will transform flow cytometry\n data taken with a linear amplifier of gain ``g`` back to RFIs.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : int, str, list of int, list of str, optional\n Channels on which to perform the transformation. If `channels` is\n None, perform transformation in all channels.\n amplification_type : tuple or list of tuple\n The amplification type of the specified channel(s). This should be\n reported as a tuple, in which the first element indicates how many\n decades the logarithmic amplifier covers, and the second indicates\n the linear value that corresponds to a channel value of zero. If\n the first element is zero, the amplification type is linear. This\n is similar to the $PnE keyword from the FCS standard. If None, take\n `amplification_type` from ``data.amplification_type(channel)``.\n amplifier_gain : float or list of floats, optional\n The linear amplifier gain of the specified channel(s). Only used if\n ``amplification_type[0]==0`` (linear amplifier). If None,\n take `amplifier_gain` from ``data.amplifier_gain(channel)``. If\n `data` does not contain ``amplifier_gain()``, use 1.0.\n resolution : int, float, or list of int or float, optional\n Maximum range, for each specified channel. Only needed if\n ``amplification_type[0]!=0`` (log amplifier). If None, take\n `resolution` from ``len(data.domain(channel))``.\n\n Returns\n -------\n FCSData or numpy array\n NxD transformed flow cytometry data.\n\n \"\"\"\n # Default: all channels\n if channels is None:\n channels = range(data.shape[1])\n\n if not (hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types)):\n # If channels is not an iterable, convert it, along with resolution,\n # amplification_type, and amplifier_gain.\n channels = [channels]\n amplification_type = [amplification_type]\n amplifier_gain = [amplifier_gain]\n resolution = [resolution]\n else:\n # If channels is an iterable, check that the other attributes are either\n # None, or iterables of the same length.\n\n if amplification_type is None:\n # If None, propagate None for all channels\n amplification_type = [None]*len(channels)\n elif hasattr(amplification_type, '__iter__'):\n # If it's a list, it should be the same length as channels\n if len(amplification_type) != len(channels):\n raise ValueError(\"channels and amplification_type should have \"\n \"the same length\")\n else:\n # If it's not a list or None, raise error\n raise ValueError(\"channels and amplification_type should have the \"\n \"same length\")\n\n if amplifier_gain is None:\n # If None, propagate None for all channels\n amplifier_gain = [None]*len(channels)\n elif hasattr(amplifier_gain, '__iter__'):\n # If it's a list, it should be the same length as channels\n if len(amplifier_gain) != len(channels):\n raise ValueError(\"channels and amplifier_gain should have \"\n \"the same length\")\n else:\n # If it's not a list or None, raise error\n raise ValueError(\"channels and amplifier_gain should have the \"\n \"same length\")\n\n if resolution is None:\n # If None, propagate None for all channels\n resolution = [None]*len(channels)\n elif hasattr(resolution, '__iter__'):\n # If it's a list, it should be the same length as channels\n if len(resolution) != len(channels):\n raise ValueError(\"channels and resolution should have \"\n \"the same length\")\n else:\n # If it's not a list or None, raise error\n raise ValueError(\"channels and resolution should have the \"\n \"same length\")\n\n # Convert channels to integers\n if hasattr(data, '_name_to_index'):\n channels = data._name_to_index(channels)\n else:\n channels = channels\n\n # Copy data array\n data_t = data.copy().astype(np.float64)\n\n # Iterate over channels\n for channel, r, at, ag in \\\n zip(channels, resolution, amplification_type, amplifier_gain):\n # If amplification type is None, try to obtain from data\n if at is None:\n if hasattr(data, 'amplification_type'):\n at = data.amplification_type(channel)\n else:\n raise ValueError('amplification_type should be specified')\n # Define transformation, depending on at[0]\n if at[0]==0:\n # Linear amplifier\n # If no amplifier gain has been specified, try to obtain from data,\n # otherwise assume one\n if ag is None:\n if hasattr(data, 'amplifier_gain') and \\\n hasattr(data.amplifier_gain, '__call__'):\n ag = data.amplifier_gain(channel)\n # If the linear gain has not been specified, it should be\n # assumed to be one.\n if ag is None:\n ag = 1.\n else:\n ag = 1.\n tf = lambda x: x/ag\n else:\n # Log amplifier\n # If no range has been specified, try to obtain from data.\n if r is None:\n if hasattr(data, 'resolution'):\n r = data.resolution(channel)\n else:\n raise ValueError('range should be specified')\n tf = lambda x: at[1] * 10**(at[0]/float(r) * x)\n # Apply transformation to event list\n data_t[:,channel] = tf(data_t[:,channel])\n # Apply transformation to range\n if hasattr(data_t, '_range') and data_t._range[channel] is not None:\n data_t._range[channel] = [tf(data_t._range[channel][0]),\n tf(data_t._range[channel][1])]\n\n return data_t"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ntransform flow cytometry data using a standard curve function.", "response": "def to_mef(data, channels, sc_list, sc_channels = None):\n \"\"\"\n Transform flow cytometry data using a standard curve function.\n\n This function accepts a list of standard curves (`sc_list`) and a list\n of channels to which those standard curves should be applied\n (`sc_channels`). `to_mef` automatically checks whether a standard curve\n is available for each channel specified in `channels`, and throws an\n error otherwise.\n\n This function is intended to be reduced to the following signature::\n\n to_mef_reduced(data, channels)\n\n by using ``functools.partial`` once a list of standard curves and their\n respective channels is available.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : int, str, list of int, list of str\n Channels on which to perform the transformation. If `channels` is\n None, perform transformation in all channels specified on\n `sc_channels`.\n sc_list : list of functions\n Functions implementing the standard curves for each channel in\n `sc_channels`.\n sc_channels : list of int or list of str, optional\n List of channels corresponding to each function in `sc_list`. If\n None, use all channels in `data`.\n\n Returns\n -------\n FCSData or numpy array\n NxD transformed flow cytometry data.\n\n Raises\n ------\n ValueError\n If any channel specified in `channels` is not in `sc_channels`.\n\n \"\"\"\n # Default sc_channels\n if sc_channels is None:\n if data.ndim == 1:\n sc_channels = range(data.shape[0])\n else:\n sc_channels = range(data.shape[1])\n # Check that sc_channels and sc_list have the same length\n if len(sc_channels) != len(sc_list):\n raise ValueError(\"sc_channels and sc_list should have the same length\")\n # Convert sc_channels to indices\n if hasattr(data, '_name_to_index'):\n sc_channels = data._name_to_index(sc_channels)\n\n # Default channels\n if channels is None:\n channels = sc_channels\n # Convert channels to iterable\n if not (hasattr(channels, '__iter__') \\\n and not isinstance(channels, six.string_types)):\n channels = [channels]\n # Convert channels to index\n if hasattr(data, '_name_to_index'):\n channels_ind = data._name_to_index(channels)\n else:\n channels_ind = channels\n # Check if every channel is in sc_channels\n for chi, chs in zip(channels_ind, channels):\n if chi not in sc_channels:\n raise ValueError(\"no standard curve for channel {}\".format(chs))\n\n # Copy data array\n data_t = data.copy().astype(np.float64)\n\n # Iterate over channels\n for chi, sc in zip(sc_channels, sc_list):\n if chi not in channels_ind:\n continue\n # Apply transformation\n data_t[:,chi] = sc(data_t[:,chi])\n # Apply transformation to range\n if hasattr(data_t, '_range') and data_t._range[chi] is not None:\n data_t._range[chi] = [sc(data_t._range[chi][0]),\n sc(data_t._range[chi][1])]\n\n return data_t"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef hist1d(data_list,\n channel=0,\n xscale='logicle',\n bins=256,\n histtype='stepfilled',\n normed_area=False,\n normed_height=False,\n xlabel=None,\n ylabel=None,\n xlim=None,\n ylim=None,\n title=None,\n legend=False,\n legend_loc='best',\n legend_fontsize='medium',\n legend_labels=None,\n facecolor=None,\n edgecolor=None,\n savefig=None,\n **kwargs):\n \"\"\"\n Plot one 1D histogram from one or more flow cytometry data sets.\n\n Parameters\n ----------\n data_list : FCSData or numpy array or list of FCSData or numpy array\n Flow cytometry data to plot.\n channel : int or str, optional\n Channel from where to take the events to plot. If ndim == 1,\n channel is ignored. String channel specifications are only\n supported for data types which support string-based indexing\n (e.g. FCSData).\n xscale : str, optional\n Scale of the x axis, either ``linear``, ``log``, or ``logicle``.\n bins : int or array_like, optional\n If `bins` is an integer, it specifies the number of bins to use.\n If `bins` is an array, it specifies the bin edges to use. If `bins`\n is None or an integer, `hist1d` will attempt to use\n ``data.hist_bins`` to generate the bins automatically.\n histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, str, optional\n Histogram type. Directly passed to ``plt.hist``.\n normed_area : bool, optional\n Flag indicating whether to normalize the histogram such that the\n area under the curve is equal to one. The resulting plot is\n equivalent to a probability density function.\n normed_height : bool, optional\n Flag indicating whether to normalize the histogram such that the\n sum of all bins' heights is equal to one. The resulting plot is\n equivalent to a probability mass function. `normed_height` is\n ignored if `normed_area` is True.\n savefig : str, optional\n The name of the file to save the figure to. If None, do not save.\n\n Other parameters\n ----------------\n xlabel : str, optional\n Label to use on the x axis. If None, attempts to extract channel\n name from last data object.\n ylabel : str, optional\n Label to use on the y axis. If None and ``normed_area==True``, use\n 'Probability'. If None, ``normed_area==False``, and\n ``normed_height==True``, use 'Counts (normalized)'. If None,\n ``normed_area==False``, and ``normed_height==False``, use 'Counts'.\n xlim : tuple, optional\n Limits for the x axis. If not specified and `bins` exists, use\n the lowest and highest values of `bins`.\n ylim : tuple, optional\n Limits for the y axis.\n title : str, optional\n Plot title.\n legend : bool, optional\n Flag specifying whether to include a legend. If `legend` is True,\n the legend labels will be taken from `legend_labels` if present,\n else they will be taken from ``str(data_list[i])``.\n legend_loc : str, optional\n Location of the legend.\n legend_fontsize : int or str, optional\n Font size for the legend.\n legend_labels : list, optional\n Labels to use for the legend.\n facecolor : matplotlib color or list of matplotlib colors, optional\n The histogram's facecolor. It can be a list with the same length as\n `data_list`. If `edgecolor` and `facecolor` are not specified, and\n ``histtype == 'stepfilled'``, the facecolor will be taken from the\n module-level variable `cmap_default`.\n edgecolor : matplotlib color or list of matplotlib colors, optional\n The histogram's edgecolor. It can be a list with the same length as\n `data_list`. If `edgecolor` and `facecolor` are not specified, and\n ``histtype == 'step'``, the edgecolor will be taken from the\n module-level variable `cmap_default`.\n kwargs : dict, optional\n Additional parameters passed directly to matploblib's ``hist``.\n\n Notes\n -----\n `hist1d` calls matplotlib's ``hist`` function for each object in\n `data_list`. `hist_type`, the type of histogram to draw, is directly\n passed to ``plt.hist``. Additional keyword arguments provided to\n `hist1d` are passed directly to ``plt.hist``.\n\n If `normed_area` is set to True, `hist1d` calls ``plt.hist`` with\n ``density`` (or ``normed``, if matplotlib's version is older than\n 2.2.0) set to True. There is a bug in matplotlib 2.1.0 that\n produces an incorrect plot in these conditions. We do not recommend\n using matplotlib 2.1.0 if `normed_area` is expected to be used.\n\n \"\"\"\n # Using `normed_area` with matplotlib 2.1.0 causes an incorrect plot to be\n # produced. Raise warning in these conditions.\n if normed_area and packaging.version.parse(matplotlib.__version__) \\\n == packaging.version.parse('2.1.0'):\n warnings.warn(\"bug in matplotlib 2.1.0 will result in an incorrect plot\"\n \" when normed_area is set to True\")\n\n # Convert to list if necessary\n if not isinstance(data_list, list):\n data_list = [data_list]\n\n # Default colors\n if histtype == 'stepfilled':\n if facecolor is None:\n facecolor = [cmap_default(i)\n for i in np.linspace(0, 1, len(data_list))]\n if edgecolor is None:\n edgecolor = ['black']*len(data_list)\n elif histtype == 'step':\n if edgecolor is None:\n edgecolor = [cmap_default(i)\n for i in np.linspace(0, 1, len(data_list))]\n\n # Convert colors to lists if necessary\n if not isinstance(edgecolor, list):\n edgecolor = [edgecolor]*len(data_list)\n if not isinstance(facecolor, list):\n facecolor = [facecolor]*len(data_list)\n\n # Collect scale parameters that depend on all elements in data_list\n xscale_kwargs = {}\n if xscale=='logicle':\n t = _LogicleTransform(data=data_list, channel=channel)\n xscale_kwargs['T'] = t.T\n xscale_kwargs['M'] = t.M\n xscale_kwargs['W'] = t.W\n\n # Iterate through data_list\n for i, data in enumerate(data_list):\n # Extract channel\n if data.ndim > 1:\n y = data[:, channel]\n else:\n y = data\n\n # If ``data_plot.hist_bins()`` exists, obtain bin edges from it if\n # necessary. If it does not exist, do not modify ``bins``.\n if hasattr(y, 'hist_bins') and hasattr(y.hist_bins, '__call__'):\n # If bins is None or an integer, get bin edges from\n # ``data_plot.hist_bins()``.\n if bins is None or isinstance(bins, int):\n bins = y.hist_bins(channels=0,\n nbins=bins,\n scale=xscale,\n **xscale_kwargs)\n\n # Decide whether to normalize\n if normed_height and not normed_area:\n weights = np.ones_like(y)/float(len(y))\n else:\n weights = None\n\n # Actually plot\n if packaging.version.parse(matplotlib.__version__) \\\n >= packaging.version.parse('2.2'):\n if bins is not None:\n n, edges, patches = plt.hist(y,\n bins,\n weights=weights,\n density=normed_area,\n histtype=histtype,\n edgecolor=edgecolor[i],\n facecolor=facecolor[i],\n **kwargs)\n else:\n n, edges, patches = plt.hist(y,\n weights=weights,\n density=normed_area,\n histtype=histtype,\n edgecolor=edgecolor[i],\n facecolor=facecolor[i],\n **kwargs)\n else:\n if bins is not None:\n n, edges, patches = plt.hist(y,\n bins,\n weights=weights,\n normed=normed_area,\n histtype=histtype,\n edgecolor=edgecolor[i],\n facecolor=facecolor[i],\n **kwargs)\n else:\n n, edges, patches = plt.hist(y,\n weights=weights,\n normed=normed_area,\n histtype=histtype,\n edgecolor=edgecolor[i],\n facecolor=facecolor[i],\n **kwargs)\n\n # Set scale of x axis\n if xscale=='logicle':\n plt.gca().set_xscale(xscale, data=data_list, channel=channel)\n else:\n plt.gca().set_xscale(xscale)\n\n ###\n # Final configuration\n ###\n\n # x and y labels\n if xlabel is not None:\n # Highest priority is user-provided label\n plt.xlabel(xlabel)\n elif hasattr(y, 'channels'):\n # Attempt to use channel name\n plt.xlabel(y.channels[0])\n\n if ylabel is not None:\n # Highest priority is user-provided label\n plt.ylabel(ylabel)\n elif normed_area:\n plt.ylabel('Probability')\n elif normed_height:\n plt.ylabel('Counts (normalized)')\n else:\n # Default is \"Counts\"\n plt.ylabel('Counts')\n\n # x and y limits\n if xlim is not None:\n # Highest priority is user-provided limits\n plt.xlim(xlim)\n elif bins is not None:\n # Use bins if specified\n plt.xlim((edges[0], edges[-1]))\n\n if ylim is not None:\n plt.ylim(ylim)\n\n # Title\n if title is not None:\n plt.title(title)\n\n # Legend\n if legend:\n if legend_labels is None:\n legend_labels = [str(data) for data in data_list]\n plt.legend(legend_labels,\n loc=legend_loc,\n prop={'size': legend_fontsize})\n\n # Save if necessary\n if savefig is not None:\n plt.tight_layout()\n plt.savefig(savefig, dpi=savefig_dpi)\n plt.close()", "response": "Plots one 1D histogram from one or more flow cytometry data sets."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef density2d(data, \n channels=[0,1],\n bins=1024,\n mode='mesh',\n normed=False,\n smooth=True,\n sigma=10.0,\n colorbar=False,\n xscale='logicle',\n yscale='logicle',\n xlabel=None,\n ylabel=None,\n xlim=None,\n ylim=None,\n title=None,\n savefig=None,\n **kwargs):\n \"\"\"\n Plot a 2D density plot from two channels of a flow cytometry data set.\n\n `density2d` has two plotting modes which are selected using the `mode`\n argument. With ``mode=='mesh'``, this function plots the data as a true\n 2D histogram, in which a plane is divided into bins and the color of\n each bin is directly related to the number of elements therein. With\n ``mode=='scatter'``, this function also calculates a 2D histogram,\n but it plots a 2D scatter plot in which each dot corresponds to a bin,\n colored according to the number elements therein. The most important\n difference is that the ``scatter`` mode does not color regions\n corresponding to empty bins. This allows for easy identification of\n regions with low number of events. For both modes, the calculated\n histogram can be smoothed using a Gaussian kernel by specifying\n ``smooth=True``. The width of the kernel is, in this case, given by\n `sigma`.\n\n Parameters\n ----------\n data : FCSData or numpy array\n Flow cytometry data to plot.\n channels : list of int, list of str, optional\n Two channels to use for the plot.\n bins : int or array_like or [int, int] or [array, array], optional\n Bins used for plotting:\n\n - If None, use ``data.hist_bins`` to obtain bin edges for both\n axes. None is not allowed if ``data.hist_bins`` is not\n available.\n - If int, `bins` specifies the number of bins to use for both\n axes. If ``data.hist_bins`` exists, it will be used to generate\n a number `bins` of bins.\n - If array_like, `bins` directly specifies the bin edges to use\n for both axes.\n - If [int, int], each element of `bins` specifies the number of\n bins for each axis. If ``data.hist_bins`` exists, use it to\n generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.\n - If [array, array], each element of `bins` directly specifies\n the bin edges to use for each axis.\n - Any combination of the above, such as [int, array], [None,\n int], or [array, int]. In this case, None indicates to generate\n bin edges using ``data.hist_bins`` as above, int indicates the\n number of bins to generate, and an array directly indicates the\n bin edges. Note that None is not allowed if ``data.hist_bins``\n does not exist.\n mode : {'mesh', 'scatter'}, str, optional\n Plotting mode. 'mesh' produces a 2D-histogram whereas 'scatter'\n produces a scatterplot colored by histogram bin value.\n normed : bool, optional\n Flag indicating whether to plot a normed histogram (probability\n mass function instead of a counts-based histogram).\n smooth : bool, optional\n Flag indicating whether to apply Gaussian smoothing to the\n histogram.\n colorbar : bool, optional\n Flag indicating whether to add a colorbar to the plot.\n savefig : str, optional\n The name of the file to save the figure to. If None, do not save.\n\n Other parameters\n ----------------\n sigma : float, optional\n The sigma parameter for the Gaussian kernel to use when smoothing.\n xscale : str, optional\n Scale of the x axis, either ``linear``, ``log``, or ``logicle``.\n yscale : str, optional\n Scale of the y axis, either ``linear``, ``log``, or ``logicle``\n xlabel : str, optional\n Label to use on the x axis. If None, attempts to extract channel\n name from `data`.\n ylabel : str, optional\n Label to use on the y axis. If None, attempts to extract channel\n name from `data`.\n xlim : tuple, optional\n Limits for the x axis. If not specified and `bins` exists, use\n the lowest and highest values of `bins`.\n ylim : tuple, optional\n Limits for the y axis. If not specified and `bins` exists, use\n the lowest and highest values of `bins`.\n title : str, optional\n Plot title.\n kwargs : dict, optional\n Additional parameters passed directly to the underlying matplotlib\n functions: ``plt.scatter`` if ``mode==scatter``, and\n ``plt.pcolormesh`` if ``mode==mesh``.\n\n \"\"\"\n # Extract channels to plot\n if len(channels) != 2:\n raise ValueError('two channels need to be specified')\n data_plot = data[:, channels]\n\n # If ``data_plot.hist_bins()`` exists, obtain bin edges from it if\n # necessary.\n if hasattr(data_plot, 'hist_bins') and \\\n hasattr(data_plot.hist_bins, '__call__'):\n # Check whether `bins` contains information for one or two axes\n if hasattr(bins, '__iter__') and len(bins)==2:\n # `bins` contains separate information for both axes\n # If bins for the X axis is not an iterable, get bin edges from\n # ``data_plot.hist_bins()``.\n if not hasattr(bins[0], '__iter__'):\n bins[0] = data_plot.hist_bins(channels=0,\n nbins=bins[0],\n scale=xscale)\n # If bins for the Y axis is not an iterable, get bin edges from\n # ``data_plot.hist_bins()``.\n if not hasattr(bins[1], '__iter__'):\n bins[1] = data_plot.hist_bins(channels=1,\n nbins=bins[1],\n scale=yscale)\n else:\n # `bins` contains information for one axis, which will be used\n # twice.\n # If bins is not an iterable, get bin edges from\n # ``data_plot.hist_bins()``.\n if not hasattr(bins, '__iter__'):\n bins = [data_plot.hist_bins(channels=0,\n nbins=bins,\n scale=xscale),\n data_plot.hist_bins(channels=1,\n nbins=bins,\n scale=yscale)]\n\n else:\n # Check if ``bins`` is None and raise error\n if bins is None:\n raise ValueError(\"bins should be specified\")\n\n # If colormap is not specified, use the default of this module\n if 'cmap' not in kwargs:\n kwargs['cmap'] = cmap_default\n\n # Calculate histogram\n H,xe,ye = np.histogram2d(data_plot[:,0], data_plot[:,1], bins=bins)\n\n # Smooth \n if smooth:\n sH = scipy.ndimage.filters.gaussian_filter(\n H,\n sigma=sigma,\n order=0,\n mode='constant',\n cval=0.0)\n else:\n sH = None\n\n # Normalize\n if normed:\n H = H / np.sum(H)\n sH = sH / np.sum(sH) if sH is not None else None\n\n ###\n # Plot\n ###\n\n # numpy histograms are organized such that the 1st dimension (eg. FSC) =\n # rows (1st index) and the 2nd dimension (eg. SSC) = columns (2nd index).\n # Visualized as is, this results in x-axis = SSC and y-axis = FSC, which\n # is not what we're used to. Transpose the histogram array to fix the\n # axes.\n H = H.T\n sH = sH.T if sH is not None else None\n\n if mode == 'scatter':\n Hind = np.ravel(H)\n xc = (xe[:-1] + xe[1:]) / 2.0 # x-axis bin centers\n yc = (ye[:-1] + ye[1:]) / 2.0 # y-axis bin centers\n xv, yv = np.meshgrid(xc, yc)\n x = np.ravel(xv)[Hind != 0]\n y = np.ravel(yv)[Hind != 0]\n z = np.ravel(H if sH is None else sH)[Hind != 0]\n plt.scatter(x, y, s=1.5, edgecolor='none', c=z, **kwargs)\n elif mode == 'mesh':\n plt.pcolormesh(xe, ye, H if sH is None else sH, **kwargs)\n else:\n raise ValueError(\"mode {} not recognized\".format(mode))\n\n if colorbar:\n cbar = plt.colorbar()\n if normed:\n cbar.ax.set_ylabel('Probability')\n else:\n cbar.ax.set_ylabel('Counts')\n\n # Set scale of axes\n if xscale=='logicle':\n plt.gca().set_xscale(xscale, data=data_plot, channel=0)\n else:\n plt.gca().set_xscale(xscale)\n if yscale=='logicle':\n plt.gca().set_yscale(yscale, data=data_plot, channel=1)\n else:\n plt.gca().set_yscale(yscale)\n\n # x and y limits\n if xlim is not None:\n # Highest priority is user-provided limits\n plt.xlim(xlim)\n else:\n # Use histogram edges\n plt.xlim((xe[0], xe[-1]))\n\n if ylim is not None:\n # Highest priority is user-provided limits\n plt.ylim(ylim)\n else:\n # Use histogram edges\n plt.ylim((ye[0], ye[-1]))\n\n # x and y labels\n if xlabel is not None:\n # Highest priority is user-provided label\n plt.xlabel(xlabel)\n elif hasattr(data_plot, 'channels'):\n # Attempt to use channel name\n plt.xlabel(data_plot.channels[0])\n\n if ylabel is not None:\n # Highest priority is user-provided label\n plt.ylabel(ylabel)\n elif hasattr(data_plot, 'channels'):\n # Attempt to use channel name\n plt.ylabel(data_plot.channels[1])\n\n # title\n if title is not None:\n plt.title(title)\n\n # Save if necessary\n if savefig is not None:\n plt.tight_layout()\n plt.savefig(savefig, dpi=savefig_dpi)\n plt.close()", "response": "This function plots a 2D density plot from two channels of flow cytometry data."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngenerating a 2D scatter plot from one or more FCSData objects or numpy arrays.", "response": "def scatter2d(data_list, \n channels=[0,1],\n xscale='logicle',\n yscale='logicle',\n xlabel=None,\n ylabel=None,\n xlim=None,\n ylim=None,\n title=None,\n color=None,\n savefig=None,\n **kwargs):\n \"\"\"\n Plot 2D scatter plot from one or more FCSData objects or numpy arrays.\n\n Parameters\n ----------\n data_list : array or FCSData or list of array or list of FCSData\n Flow cytometry data to plot.\n channels : list of int, list of str\n Two channels to use for the plot.\n savefig : str, optional\n The name of the file to save the figure to. If None, do not save.\n\n Other parameters\n ----------------\n xscale : str, optional\n Scale of the x axis, either ``linear``, ``log``, or ``logicle``.\n yscale : str, optional\n Scale of the y axis, either ``linear``, ``log``, or ``logicle``.\n xlabel : str, optional\n Label to use on the x axis. If None, attempts to extract channel\n name from last data object.\n ylabel : str, optional\n Label to use on the y axis. If None, attempts to extract channel\n name from last data object.\n xlim : tuple, optional\n Limits for the x axis. If None, attempts to extract limits from the\n range of the last data object.\n ylim : tuple, optional\n Limits for the y axis. If None, attempts to extract limits from the\n range of the last data object.\n title : str, optional\n Plot title.\n color : matplotlib color or list of matplotlib colors, optional\n Color for the scatter plot. It can be a list with the same length\n as `data_list`. If `color` is not specified, elements from\n `data_list` are plotted with colors taken from the module-level\n variable `cmap_default`.\n kwargs : dict, optional\n Additional parameters passed directly to matploblib's ``scatter``.\n\n Notes\n -----\n `scatter2d` calls matplotlib's ``scatter`` function for each object in\n data_list. Additional keyword arguments provided to `scatter2d` are\n passed directly to ``plt.scatter``.\n\n \"\"\"\n # Check appropriate number of channels\n if len(channels) != 2:\n raise ValueError('two channels need to be specified')\n\n # Convert to list if necessary\n if not isinstance(data_list, list):\n data_list = [data_list]\n\n # Default colors\n if color is None:\n color = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))]\n\n # Convert color to list, if necessary\n if not isinstance(color, list):\n color = [color]*len(data_list)\n\n # Iterate through data_list\n for i, data in enumerate(data_list):\n # Get channels to plot\n data_plot = data[:, channels]\n # Make scatter plot\n plt.scatter(data_plot[:,0],\n data_plot[:,1],\n s=5,\n alpha=0.25,\n color=color[i],\n **kwargs)\n\n # Set labels if specified, else try to extract channel names\n if xlabel is not None:\n plt.xlabel(xlabel)\n elif hasattr(data_plot, 'channels'):\n plt.xlabel(data_plot.channels[0])\n if ylabel is not None:\n plt.ylabel(ylabel)\n elif hasattr(data_plot, 'channels'):\n plt.ylabel(data_plot.channels[1])\n\n # Set scale of axes\n if xscale=='logicle':\n plt.gca().set_xscale(xscale, data=data_list, channel=channels[0])\n else:\n plt.gca().set_xscale(xscale)\n if yscale=='logicle':\n plt.gca().set_yscale(yscale, data=data_list, channel=channels[1])\n else:\n plt.gca().set_yscale(yscale)\n\n # Set plot limits if specified, else extract range from data_list.\n # ``.hist_bins`` with one bin works better for visualization that\n # ``.range``, because it deals with two issues. First, it automatically\n # deals with range values that are outside the domain of the current scaling\n # (e.g. when the lower range value is zero and the scaling is logarithmic).\n # Second, it takes into account events that are outside the limits specified\n # by .range (e.g. negative events will be shown with logicle scaling, even\n # when the lower range is zero).\n if xlim is None:\n xlim = [np.inf, -np.inf]\n for data in data_list:\n if hasattr(data, 'hist_bins') and \\\n hasattr(data.hist_bins, '__call__'):\n xlim_data = data.hist_bins(channels=channels[0],\n nbins=1,\n scale=xscale)\n xlim[0] = xlim_data[0] if xlim_data[0] < xlim[0] else xlim[0]\n xlim[1] = xlim_data[1] if xlim_data[1] > xlim[1] else xlim[1]\n plt.xlim(xlim)\n if ylim is None:\n ylim = [np.inf, -np.inf]\n for data in data_list:\n if hasattr(data, 'hist_bins') and \\\n hasattr(data.hist_bins, '__call__'):\n ylim_data = data.hist_bins(channels=channels[1],\n nbins=1,\n scale=yscale)\n ylim[0] = ylim_data[0] if ylim_data[0] < ylim[0] else ylim[0]\n ylim[1] = ylim_data[1] if ylim_data[1] > ylim[1] else ylim[1]\n plt.ylim(ylim)\n\n # Title\n if title is not None:\n plt.title(title)\n\n # Save if necessary\n if savefig is not None:\n plt.tight_layout()\n plt.savefig(savefig, dpi=savefig_dpi)\n plt.close()"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nfunctioning to plot a 3D scatter plot from one or more Flow cytometry data objects or numpy arrays.", "response": "def scatter3d(data_list, \n channels=[0,1,2],\n xscale='logicle',\n yscale='logicle',\n zscale='logicle',\n xlabel=None,\n ylabel=None,\n zlabel=None,\n xlim=None,\n ylim=None,\n zlim=None,\n title=None,\n color=None,\n savefig=None,\n **kwargs):\n \"\"\"\n Plot 3D scatter plot from one or more FCSData objects or numpy arrays.\n\n Parameters\n ----------\n data_list : array or FCSData or list of array or list of FCSData\n Flow cytometry data to plot.\n channels : list of int, list of str\n Three channels to use for the plot.\n savefig : str, optional\n The name of the file to save the figure to. If None, do not save.\n\n Other parameters\n ----------------\n xscale : str, optional\n Scale of the x axis, either ``linear``, ``log``, or ``logicle``.\n yscale : str, optional\n Scale of the y axis, either ``linear``, ``log``, or ``logicle``.\n zscale : str, optional\n Scale of the z axis, either ``linear``, ``log``, or ``logicle``.\n xlabel : str, optional\n Label to use on the x axis. If None, attempts to extract channel\n name from last data object.\n ylabel : str, optional\n Label to use on the y axis. If None, attempts to extract channel\n name from last data object.\n zlabel : str, optional\n Label to use on the z axis. If None, attempts to extract channel\n name from last data object.\n xlim : tuple, optional\n Limits for the x axis. If None, attempts to extract limits from the\n range of the last data object.\n ylim : tuple, optional\n Limits for the y axis. If None, attempts to extract limits from the\n range of the last data object.\n zlim : tuple, optional\n Limits for the z axis. If None, attempts to extract limits from the\n range of the last data object.\n title : str, optional\n Plot title.\n color : matplotlib color or list of matplotlib colors, optional\n Color for the scatter plot. It can be a list with the same length\n as `data_list`. If `color` is not specified, elements from\n `data_list` are plotted with colors taken from the module-level\n variable `cmap_default`.\n kwargs : dict, optional\n Additional parameters passed directly to matploblib's ``scatter``.\n\n Notes\n -----\n `scatter3d` uses matplotlib's ``scatter`` with a 3D projection.\n Additional keyword arguments provided to `scatter3d` are passed\n directly to ``scatter``.\n\n \"\"\"\n # Check appropriate number of channels\n if len(channels) != 3:\n raise ValueError('three channels need to be specified')\n\n # Convert to list if necessary\n if not isinstance(data_list, list):\n data_list = [data_list]\n\n # Default colors\n if color is None:\n color = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))]\n\n # Convert color to list, if necessary\n if not isinstance(color, list):\n color = [color]*len(data_list)\n\n # Get transformation functions for each axis\n # Explicit rescaling is required for non-linear scales because mplot3d does\n # not natively support anything but linear scale.\n if xscale == 'linear':\n xscale_transform = lambda x: x\n elif xscale == 'log':\n xscale_transform = np.log10\n elif xscale == 'logicle':\n t = _LogicleTransform(data=data_list, channel=channels[0])\n it = _InterpolatedInverseTransform(t, 0, t.M)\n xscale_transform = it.transform_non_affine\n else:\n raise ValueError('scale {} not supported'.format(xscale))\n\n if yscale == 'linear':\n yscale_transform = lambda x: x\n elif yscale == 'log':\n yscale_transform = np.log10\n elif yscale == 'logicle':\n t = _LogicleTransform(data=data_list, channel=channels[1])\n it = _InterpolatedInverseTransform(t, 0, t.M)\n yscale_transform = it.transform_non_affine\n else:\n raise ValueError('scale {} not supported'.format(yscale))\n\n if zscale == 'linear':\n zscale_transform = lambda x: x\n elif zscale == 'log':\n zscale_transform = np.log10\n elif zscale == 'logicle':\n t = _LogicleTransform(data=data_list, channel=channels[2])\n it = _InterpolatedInverseTransform(t, 0, t.M)\n zscale_transform = it.transform_non_affine\n else:\n raise ValueError('scale {} not supported'.format(zscale))\n\n # Make 3d axis if necessary\n ax_3d = plt.gca(projection='3d')\n\n # Iterate through data_list\n for i, data in enumerate(data_list):\n # Get channels to plot\n data_plot = data[:, channels]\n # Make scatter plot\n ax_3d.scatter(xscale_transform(data_plot[:, 0]),\n yscale_transform(data_plot[:, 1]),\n zscale_transform(data_plot[:, 2]),\n marker='o',\n alpha=0.1,\n color=color[i],\n **kwargs)\n\n # Remove tick labels\n ax_3d.xaxis.set_ticklabels([])\n ax_3d.yaxis.set_ticklabels([])\n ax_3d.zaxis.set_ticklabels([])\n\n # Set labels if specified, else try to extract channel names\n if xlabel is not None:\n ax_3d.set_xlabel(xlabel)\n elif hasattr(data_plot, 'channels'):\n ax_3d.set_xlabel(data_plot.channels[0])\n if ylabel is not None:\n ax_3d.set_ylabel(ylabel)\n elif hasattr(data_plot, 'channels'):\n ax_3d.set_ylabel(data_plot.channels[1])\n if zlabel is not None:\n ax_3d.set_zlabel(zlabel)\n elif hasattr(data_plot, 'channels'):\n ax_3d.set_zlabel(data_plot.channels[2])\n\n # Set plot limits if specified, else extract range from data_plot\n # ``.hist_bins`` with one bin works better for visualization that\n # ``.range``, because it deals with two issues. First, it automatically\n # deals with range values that are outside the domain of the current scaling\n # (e.g. when the lower range value is zero and the scaling is logarithmic).\n # Second, it takes into account events that are outside the limits specified\n # by .range (e.g. negative events will be shown with logicle scaling, even\n # when the lower range is zero).\n if xlim is None:\n xlim = np.array([np.inf, -np.inf])\n for data in data_list:\n if hasattr(data, 'hist_bins') and \\\n hasattr(data.hist_bins, '__call__'):\n xlim_data = data.hist_bins(channels=channels[0],\n nbins=1,\n scale=xscale)\n xlim[0] = xlim_data[0] if xlim_data[0] < xlim[0] else xlim[0]\n xlim[1] = xlim_data[1] if xlim_data[1] > xlim[1] else xlim[1]\n xlim = xscale_transform(xlim)\n ax_3d.set_xlim(xlim)\n\n if ylim is None:\n ylim = np.array([np.inf, -np.inf])\n for data in data_list:\n if hasattr(data, 'hist_bins') and \\\n hasattr(data.hist_bins, '__call__'):\n ylim_data = data.hist_bins(channels=channels[1],\n nbins=1,\n scale=yscale)\n ylim[0] = ylim_data[0] if ylim_data[0] < ylim[0] else ylim[0]\n ylim[1] = ylim_data[1] if ylim_data[1] > ylim[1] else ylim[1]\n ylim = yscale_transform(ylim)\n ax_3d.set_ylim(ylim)\n\n if zlim is None:\n zlim = np.array([np.inf, -np.inf])\n for data in data_list:\n if hasattr(data, 'hist_bins') and \\\n hasattr(data.hist_bins, '__call__'):\n zlim_data = data.hist_bins(channels=channels[2],\n nbins=1,\n scale=zscale)\n zlim[0] = zlim_data[0] if zlim_data[0] < zlim[0] else zlim[0]\n zlim[1] = zlim_data[1] if zlim_data[1] > zlim[1] else zlim[1]\n zlim = zscale_transform(zlim)\n ax_3d.set_zlim(zlim)\n\n # Title\n if title is not None:\n plt.title(title)\n\n # Save if necessary\n if savefig is not None:\n plt.tight_layout()\n plt.savefig(savefig, dpi=savefig_dpi)\n plt.close()"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nmaking a combined density/histogram plot of a FCSData object. This function calls `hist1d` and `density2d` to plot a density diagram and a number of histograms in different subplots of the same plot using one single function call. Setting `density_channels` to None will not produce a density diagram, and setting `hist_channels` to None will not produce any histograms. Setting both to None will raise an error. Additional parameters can be provided to `density2d` and `hist1d` by using `density_params` and `hist_params`. If `gated_data` is provided, this function will plot the histograms corresponding to `gated_data` on top of `data`'s histograms, with some transparency on `data`. In addition, a legend will be added with the labels 'Ungated' and 'Gated'. If `gate_contour` is provided and it contains a valid list of 2D curves, these will be plotted on top of the density plot. Parameters ---------- data : FCSData object Flow cytometry data object to plot. gated_data : FCSData object, optional Flow cytometry data object. If `gated_data` is specified, the histograms of `data` are plotted with an alpha value of 0.5, and the histograms of `gated_data` are plotted on top of those with an alpha value of 1.0. gate_contour : list, optional List of Nx2 curves, representing a gate contour to be plotted in the density diagram. density_channels : list Two channels to use for the density plot. If `density_channels` is None, do not plot a density plot. density_params : dict, optional Parameters to pass to `density2d`. hist_channels : list Channels to use for each histogram. If `hist_channels` is None, do not plot histograms. hist_params : list, optional List of dictionaries with the parameters to pass to each call of `hist1d`. savefig : str, optional The name of the file to save the figure to. If None, do not save. Other parameters ---------------- figsize : tuple, optional Figure size. If None, calculate a default based on the number of subplots. Raises ------ ValueError If both `density_channels` and `hist_channels` are None.", "response": "def density_and_hist(data,\n gated_data=None,\n gate_contour=None,\n density_channels=None,\n density_params={},\n hist_channels=None,\n hist_params={},\n figsize=None,\n savefig=None):\n \"\"\"\n Make a combined density/histogram plot of a FCSData object.\n\n This function calls `hist1d` and `density2d` to plot a density diagram\n and a number of histograms in different subplots of the same plot using\n one single function call. Setting `density_channels` to None will not\n produce a density diagram, and setting `hist_channels` to None will not\n produce any histograms. Setting both to None will raise an error.\n Additional parameters can be provided to `density2d` and `hist1d` by\n using `density_params` and `hist_params`.\n\n If `gated_data` is provided, this function will plot the histograms\n corresponding to `gated_data` on top of `data`'s histograms, with some\n transparency on `data`. In addition, a legend will be added with the\n labels 'Ungated' and 'Gated'. If `gate_contour` is provided and it\n contains a valid list of 2D curves, these will be plotted on top of the\n density plot.\n\n Parameters\n ----------\n data : FCSData object\n Flow cytometry data object to plot.\n gated_data : FCSData object, optional\n Flow cytometry data object. If `gated_data` is specified, the\n histograms of `data` are plotted with an alpha value of 0.5, and\n the histograms of `gated_data` are plotted on top of those with\n an alpha value of 1.0.\n gate_contour : list, optional\n List of Nx2 curves, representing a gate contour to be plotted in\n the density diagram.\n density_channels : list\n Two channels to use for the density plot. If `density_channels` is\n None, do not plot a density plot.\n density_params : dict, optional\n Parameters to pass to `density2d`.\n hist_channels : list\n Channels to use for each histogram. If `hist_channels` is None,\n do not plot histograms.\n hist_params : list, optional\n List of dictionaries with the parameters to pass to each call of\n `hist1d`.\n savefig : str, optional\n The name of the file to save the figure to. If None, do not save.\n\n Other parameters\n ----------------\n figsize : tuple, optional\n Figure size. If None, calculate a default based on the number of\n subplots.\n\n Raises\n ------\n ValueError\n If both `density_channels` and `hist_channels` are None.\n\n \"\"\"\n # Check number of plots\n if density_channels is None and hist_channels is None:\n raise ValueError(\"density_channels and hist_channels cannot be both \"\n \"None\")\n\n # Change hist_channels to iterable if necessary\n if not hasattr(hist_channels, \"__iter__\"):\n hist_channels = [hist_channels]\n if isinstance(hist_params, dict):\n hist_params = [hist_params]*len(hist_channels)\n\n plot_density = not(density_channels is None)\n n_plots = plot_density + len(hist_channels)\n\n # Calculate plot size if necessary\n if figsize is None:\n height = 0.315 + 2.935*n_plots\n figsize = (6, height)\n\n # Create plot\n plt.figure(figsize=figsize)\n\n # Density plot\n if plot_density:\n plt.subplot(n_plots, 1, 1)\n # Plot density diagram\n density2d(data, channels=density_channels, **density_params)\n # Plot gate contour\n if gate_contour is not None:\n for g in gate_contour:\n plt.plot(g[:,0], g[:,1], color='k', linewidth=1.25)\n # Add title\n if 'title' not in density_params:\n if gated_data is not None:\n ret = gated_data.shape[0] * 100. / data.shape[0]\n title = \"{} ({:.1f}% retained)\".format(str(data), ret)\n else:\n title = str(data)\n plt.title(title)\n\n # Colors\n n_colors = n_plots - 1\n colors = [cmap_default(i) for i in np.linspace(0, 1, n_colors)]\n # Histogram\n for i, hist_channel in enumerate(hist_channels):\n # Define subplot\n plt.subplot(n_plots, 1, plot_density + i + 1)\n # Default colors\n hist_params_i = hist_params[i].copy()\n if 'facecolor' not in hist_params_i:\n hist_params_i['facecolor'] = colors[i]\n # Plots\n if gated_data is not None:\n hist1d(data,\n channel=hist_channel,\n alpha=0.5,\n **hist_params_i)\n hist1d(gated_data,\n channel=hist_channel,\n alpha=1.0,\n **hist_params_i)\n plt.legend(['Ungated', 'Gated'], loc='best', fontsize='medium')\n else:\n hist1d(data, channel=hist_channel, **hist_params_i)\n \n # Save if necessary\n if savefig is not None:\n plt.tight_layout()\n plt.savefig(savefig, dpi=savefig_dpi)\n plt.close()"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef scatter3d_and_projections(data_list,\n channels=[0,1,2],\n xscale='logicle',\n yscale='logicle',\n zscale='logicle',\n xlabel=None,\n ylabel=None,\n zlabel=None,\n xlim=None,\n ylim=None,\n zlim=None,\n color=None,\n figsize=None,\n savefig=None,\n **kwargs):\n \"\"\"\n Plot a 3D scatter plot and 2D projections from FCSData objects.\n\n `scatter3d_and_projections` creates a 3D scatter plot and three 2D\n projected scatter plots in four different axes for each FCSData object\n in `data_list`, in the same figure.\n\n Parameters\n ----------\n data_list : FCSData object, or list of FCSData objects\n Flow cytometry data to plot.\n channels : list of int, list of str\n Three channels to use for the plot.\n savefig : str, optional\n The name of the file to save the figure to. If None, do not save.\n\n Other parameters\n ----------------\n xscale : str, optional\n Scale of the x axis, either ``linear``, ``log``, or ``logicle``.\n yscale : str, optional\n Scale of the y axis, either ``linear``, ``log``, or ``logicle``.\n zscale : str, optional\n Scale of the z axis, either ``linear``, ``log``, or ``logicle``.\n xlabel : str, optional\n Label to use on the x axis. If None, attempts to extract channel\n name from last data object.\n ylabel : str, optional\n Label to use on the y axis. If None, attempts to extract channel\n name from last data object.\n zlabel : str, optional\n Label to use on the z axis. If None, attempts to extract channel\n name from last data object.\n xlim : tuple, optional\n Limits for the x axis. If None, attempts to extract limits from the\n range of the last data object.\n ylim : tuple, optional\n Limits for the y axis. If None, attempts to extract limits from the\n range of the last data object.\n zlim : tuple, optional\n Limits for the z axis. If None, attempts to extract limits from the\n range of the last data object.\n color : matplotlib color or list of matplotlib colors, optional\n Color for the scatter plot. It can be a list with the same length\n as `data_list`. If `color` is not specified, elements from\n `data_list` are plotted with colors taken from the module-level\n variable `cmap_default`.\n figsize : tuple, optional\n Figure size. If None, use matplotlib's default.\n kwargs : dict, optional\n Additional parameters passed directly to matploblib's ``scatter``.\n\n Notes\n -----\n `scatter3d_and_projections` uses matplotlib's ``scatter``, with the 3D\n scatter plot using a 3D projection. Additional keyword arguments\n provided to `scatter3d_and_projections` are passed directly to\n ``scatter``.\n\n \"\"\"\n # Check appropriate number of channels\n if len(channels) != 3:\n raise ValueError('three channels need to be specified')\n\n # Create figure\n plt.figure(figsize=figsize)\n\n # Axis 1: channel 0 vs channel 2\n plt.subplot(221)\n scatter2d(data_list,\n channels=[channels[0], channels[2]],\n xscale=xscale,\n yscale=zscale,\n xlabel=xlabel,\n ylabel=zlabel,\n xlim=xlim,\n ylim=zlim,\n color=color,\n **kwargs)\n\n # Axis 2: 3d plot\n ax_3d = plt.gcf().add_subplot(222, projection='3d')\n scatter3d(data_list,\n channels=channels,\n xscale=xscale,\n yscale=yscale,\n zscale=zscale,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n xlim=xlim,\n ylim=ylim,\n zlim=zlim,\n color=color,\n **kwargs)\n\n # Axis 3: channel 0 vs channel 1\n plt.subplot(223)\n scatter2d(data_list,\n channels=[channels[0], channels[1]],\n xscale=xscale,\n yscale=yscale,\n xlabel=xlabel,\n ylabel=ylabel,\n xlim=xlim,\n ylim=ylim,\n color=color,\n **kwargs)\n\n # Axis 4: channel 2 vs channel 1\n plt.subplot(224)\n scatter2d(data_list,\n channels=[channels[2], channels[1]],\n xscale=zscale,\n yscale=yscale,\n xlabel=zlabel,\n ylabel=ylabel,\n xlim=zlim,\n ylim=ylim,\n color=color,\n **kwargs)\n\n # Save if necessary\n if savefig is not None:\n plt.tight_layout()\n plt.savefig(savefig, dpi=savefig_dpi)\n plt.close()", "response": "This function creates a 3D scatter plot and 2D projections from FCSData objects."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef transform_non_affine(self, x, mask_out_of_range=True):\n # Mask out-of-range values\n if mask_out_of_range:\n x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax),\n x)\n else:\n x_masked = x\n # Calculate s and return\n return np.interp(x_masked, self._x_range, self._s_range)", "response": "Transform a Nx1 numpy array."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\napply transformation to a Nx1 numpy array.", "response": "def transform_non_affine(self, s):\n \"\"\"\n Apply transformation to a Nx1 numpy array.\n\n Parameters\n ----------\n s : array\n Data to be transformed in display scale units.\n\n Return\n ------\n array or masked array\n Transformed data, in data value units.\n\n \"\"\"\n T = self._T\n M = self._M\n W = self._W\n p = self._p\n # Calculate x\n return T * 10**(-(M-W)) * (10**(s-W) - (p**2)*10**(-(s-W)/p) + p**2 - 1)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nsetting parameters within this locator.", "response": "def set_params(self, subs=None, numticks=None):\n \"\"\"\n Set parameters within this locator.\n\n Parameters\n ----------\n subs : array, optional\n Subtick values, as multiples of the main ticks.\n numticks : array, optional\n Number of ticks.\n\n \"\"\"\n if numticks is not None:\n self.numticks = numticks\n if subs is not None:\n self._subs = subs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a set of tick values for a given logicle level.", "response": "def tick_values(self, vmin, vmax):\n \"\"\"\n Get a set of tick values properly spaced for logicle axis.\n\n \"\"\"\n # Extract base from transform object\n b = self._transform.base\n # The logicle domain is divided into two regions: A \"linear\" region,\n # which may include negative numbers, and a \"logarithmic\" region, which\n # only includes positive numbers. These two regions are separated by a\n # value t, given by the logicle equations. An illustration is given\n # below.\n #\n # -t ==0== t ========>\n # lin log\n #\n # vmin and vmax can be anywhere in this domain, meaning that both should\n # be greater than -t.\n #\n # The logarithmic region will only have major ticks at integral log\n # positions. The linear region will have a major tick at zero, and one\n # major tick at the largest absolute integral log value in screen\n # inside this region. Subticks will be added at multiples of the\n # integral log positions.\n\n # If the linear range is too small, create new transformation object\n # with slightly wider linear range. Otherwise, the number of decades\n # below will be infinite\n if self._transform.W == 0 or \\\n self._transform.M / self._transform.W > self.numticks:\n self._transform = _LogicleTransform(\n T=self._transform.T,\n M=self._transform.M,\n W=self._transform.M / self.numticks)\n # Calculate t\n t = - self._transform.transform_non_affine(0)\n\n # Swap vmin and vmax if necessary\n if vmax < vmin:\n vmin, vmax = vmax, vmin\n # Calculate minimum and maximum limits in scale units\n vmins = self._transform.inverted().transform_non_affine(vmin)\n vmaxs = self._transform.inverted().transform_non_affine(vmax)\n\n # Check whether linear or log regions are present\n has_linear = has_log = False\n if vmin <= t:\n has_linear = True\n if vmax > t:\n has_log = True\n else:\n has_log = True\n\n # Calculate number of ticks in linear and log regions\n # The number of ticks is distributed by the fraction that each region\n # occupies in scale units\n if has_linear:\n fraction_linear = (min(vmaxs, 2*self._transform.W) - vmins) / \\\n (vmaxs - vmins)\n numticks_linear = np.round(self.numticks*fraction_linear)\n else:\n numticks_linear = 0\n if has_log:\n fraction_log = (vmaxs - max(vmins, 2*self._transform.W)) / \\\n (vmaxs - vmins)\n numticks_log = np.round(self.numticks*fraction_log)\n else:\n numticks_log = 0\n\n # Calculate extended ranges and step size for tick location\n # Extended ranges take into account discretization.\n if has_log:\n # The logarithmic region's range will include from the decade\n # immediately below the lower end of the region to the decade\n # immediately above the upper end.\n # Note that this may extend the logarithmic region to the left.\n log_ext_range = [np.floor(np.log(max(vmin, t)) / np.log(b)),\n np.ceil(np.log(vmax) / np.log(b))]\n # Since a major tick will be located at the lower end of the\n # extended range, make sure that it is not too close to zero.\n if vmin <= 0:\n zero_s = self._transform.inverted().\\\n transform_non_affine(0)\n min_tick_space = 1./self.numticks\n while True:\n min_tick_s = self._transform.inverted().\\\n transform_non_affine(b**log_ext_range[0])\n if (min_tick_s - zero_s)/(vmaxs - vmins) < min_tick_space \\\n and ((log_ext_range[0] + 1) < log_ext_range[1]):\n log_ext_range[0] += 1\n else:\n break\n # Number of decades in the extended region\n log_decades = log_ext_range[1] - log_ext_range[0]\n # The step is at least one decade.\n if numticks_log > 1:\n log_step = max(np.floor(float(log_decades)/(numticks_log-1)), 1)\n else:\n log_step = 1\n else:\n # Linear region only\n linear_range = [vmin, vmax]\n # Initial step size will be one decade below the maximum whole\n # decade in the range\n linear_step = matplotlib.ticker.decade_down(\n linear_range[1] - linear_range[0], b) / b\n # Reduce the step size according to specified number of ticks\n while (linear_range[1] - linear_range[0])/linear_step > \\\n numticks_linear:\n linear_step *= b\n # Get extended range by discretizing the region limits\n vmin_ext = np.floor(linear_range[0]/linear_step)*linear_step\n vmax_ext = np.ceil(linear_range[1]/linear_step)*linear_step\n linear_range_ext = [vmin_ext, vmax_ext]\n\n # Calculate major tick positions\n major_ticklocs = []\n if has_log:\n # Logarithmic region present\n # If a linear region is present, add the negative of the lower limit\n # of the extended log region and zero. Then, add ticks for each\n # logarithmic step as calculated above.\n if has_linear:\n major_ticklocs.append(- b**log_ext_range[0])\n major_ticklocs.append(0)\n # Use nextafter to pick the next floating point number, and try to\n # include the upper limit in the generated range.\n major_ticklocs.extend(b ** (np.arange(\n log_ext_range[0],\n np.nextafter(log_ext_range[1], np.inf),\n log_step)))\n else:\n # Only linear region present\n # Draw ticks according to linear step calculated above.\n # Use nextafter to pick the next floating point number, and try to\n # include the upper limit in the generated range.\n major_ticklocs.extend(np.arange(\n linear_range_ext[0],\n np.nextafter(linear_range_ext[1], np.inf),\n linear_step))\n major_ticklocs = np.array(major_ticklocs)\n\n # Add subticks if requested\n subs = self._subs\n if (subs is not None) and (len(subs) > 1 or subs[0] != 1.0):\n ticklocs = []\n if has_log:\n # Subticks for each major tickloc present\n for major_tickloc in major_ticklocs:\n ticklocs.extend(subs * major_tickloc)\n # Subticks from one decade below the lowest\n major_ticklocs_pos = major_ticklocs[major_ticklocs > 0]\n if len(major_ticklocs_pos):\n tickloc_next_low = np.min(major_ticklocs_pos)/b\n ticklocs.append(tickloc_next_low)\n ticklocs.extend(subs * tickloc_next_low)\n # Subticks for the negative linear range\n if vmin < 0:\n ticklocs.extend([(-ti) for ti in ticklocs if ti < -vmin ])\n else:\n ticklocs = list(major_ticklocs)\n # If zero is present, add ticks from a decade below the lowest\n if (vmin < 0) and (vmax > 0):\n major_ticklocs_nonzero = major_ticklocs[\n np.nonzero(major_ticklocs)]\n tickloc_next_low = np.min(np.abs(major_ticklocs_nonzero))/b\n ticklocs.append(tickloc_next_low)\n ticklocs.extend(subs * tickloc_next_low)\n ticklocs.append(-tickloc_next_low)\n ticklocs.extend(subs * - tickloc_next_low)\n\n else:\n # Subticks not requested\n ticklocs = major_ticklocs\n\n return self.raise_if_exceeds(np.array(ticklocs))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ntry to choose the view limits intelligently.", "response": "def view_limits(self, vmin, vmax):\n \"\"\"\n Try to choose the view limits intelligently.\n\n \"\"\"\n b = self._transform.base\n if vmax < vmin:\n vmin, vmax = vmax, vmin\n\n if not matplotlib.ticker.is_decade(abs(vmin), b):\n if vmin < 0:\n vmin = -matplotlib.ticker.decade_up(-vmin, b)\n else:\n vmin = matplotlib.ticker.decade_down(vmin, b)\n if not matplotlib.ticker.is_decade(abs(vmax), b):\n if vmax < 0:\n vmax = -matplotlib.ticker.decade_down(-vmax, b)\n else:\n vmax = matplotlib.ticker.decade_up(vmax, b)\n\n if vmin == vmax:\n if vmin < 0:\n vmin = -matplotlib.ticker.decade_up(-vmin, b)\n vmax = -matplotlib.ticker.decade_down(-vmax, b)\n else:\n vmin = matplotlib.ticker.decade_down(vmin, b)\n vmax = matplotlib.ticker.decade_up(vmax, b)\n result = matplotlib.transforms.nonsingular(vmin, vmax)\n return result"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_transform(self):\n return _InterpolatedInverseTransform(transform=self._transform,\n smin=0,\n smax=self._transform._M)", "response": "Get a new object to perform the scaling transformation."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nsetting up the locators and formatters for the scale.", "response": "def set_default_locators_and_formatters(self, axis):\n \"\"\"\n Set up the locators and formatters for the scale.\n\n Parameters\n ----------\n axis: matplotlib.axis\n Axis for which to set locators and formatters.\n\n \"\"\"\n axis.set_major_locator(_LogicleLocator(self._transform))\n axis.set_minor_locator(_LogicleLocator(self._transform,\n subs=np.arange(2.0, 10.)))\n axis.set_major_formatter(matplotlib.ticker.LogFormatterSciNotation(\n labelOnlyBase=True))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn minimum and maximum bounds for the logicle axis.", "response": "def limit_range_for_scale(self, vmin, vmax, minpos):\n \"\"\"\n Return minimum and maximum bounds for the logicle axis.\n\n Parameters\n ----------\n vmin : float\n Minimum data value.\n vmax : float\n Maximum data value.\n minpos : float\n Minimum positive value in the data. Ignored by this function.\n\n Return\n ------\n float\n Minimum axis bound.\n float\n Maximum axis bound.\n\n \"\"\"\n vmin_bound = self._transform.transform_non_affine(0)\n vmax_bound = self._transform.transform_non_affine(self._transform.M)\n vmin = max(vmin, vmin_bound)\n vmax = min(vmax, vmax_bound)\n return vmin, vmax"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncalculate the mean of the events in a NxD flow cytometry data.", "response": "def mean(data, channels=None):\n \"\"\"\n Calculate the mean of the events in an FCSData object.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : int or str or list of int or list of str, optional\n Channels on which to calculate the statistic. If None, use all\n channels.\n\n Returns\n -------\n float or numpy array\n The mean of the events in the specified channels of `data`.\n\n \"\"\"\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return np.mean(data_stats, axis=0)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef gmean(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return scipy.stats.gmean(data_stats, axis=0)", "response": "Calculates the geometric mean of the events in a flow cytometry data object."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef median(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return np.median(data_stats, axis=0)", "response": "Calculate the median of the events in a flow cytometry data."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef mode(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n # scipy.stats.mode returns two outputs, the first of which is an array\n # containing the modal values. This array has the same number of\n # dimensions as the input, and with only one element in the first\n # dimension. We extract this fist element to make it match the other\n # functions in this module.\n return scipy.stats.mode(data_stats, axis=0)[0][0]", "response": "Calculate the mode of the events in a NxD flow cytometry data object."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef std(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return np.std(data_stats, axis=0)", "response": "Calculate the standard deviation of the events in a NxD flow cytometry data."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncalculates the Coeff. of Variation of the events in a flow cytometry dataset.", "response": "def cv(data, channels=None):\n \"\"\"\n Calculate the Coeff. of Variation of the events in an FCSData object.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : int or str or list of int or list of str, optional\n Channels on which to calculate the statistic. If None, use all\n channels.\n\n Returns\n -------\n float or numpy array\n The Coefficient of Variation of the events in the specified\n channels of `data`.\n\n Notes\n -----\n The Coefficient of Variation (CV) of a dataset is defined as the\n standard deviation divided by the mean of such dataset.\n\n \"\"\"\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return np.std(data_stats, axis=0) / np.mean(data_stats, axis=0)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef gstd(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return np.exp(np.std(np.log(data_stats), axis=0))", "response": "Calculates the geometric std. dev of the events in a flow cytometry data object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef gcv(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n return np.sqrt(np.exp(np.std(np.log(data_stats), axis=0)**2) - 1)", "response": "Calculate the geometric CV of the events in a NxD flow cytometry data object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef iqr(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n q75, q25 = np.percentile(data_stats, [75 ,25], axis=0)\n return q75 - q25", "response": "Calculate the Interquartile Range of the events in a flow cytometry dataset."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef rcv(data, channels=None):\n # Slice data to take statistics from\n if channels is None:\n data_stats = data\n else:\n data_stats = data[:, channels]\n\n # Calculate and return statistic\n q75, q25 = np.percentile(data_stats, [75 ,25], axis=0)\n return (q75 - q25)/np.median(data_stats, axis=0)", "response": "Calculates the RCV of the events in a flow cytometry data object."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef dados_qrcode(cfe):\n infCFe = cfe.getroot().find('./infCFe')\n cnpjcpf_consumidor = infCFe.findtext('dest/CNPJ') or \\\n infCFe.findtext('dest/CPF') or ''\n return '|'.join([\n infCFe.attrib['Id'][3:], # remove prefixo \"CFe\"\n '{}{}'.format(\n infCFe.findtext('ide/dEmi'),\n infCFe.findtext('ide/hEmi')),\n infCFe.findtext('total/vCFe'),\n cnpjcpf_consumidor,\n infCFe.findtext('ide/assinaturaQRCODE'),])", "response": "Compila os dados que compila o QRCode do CF - e - SAT."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef partes(self, num_partes=11):\n assert 44 % num_partes == 0, 'O numero de partes nao produz um '\\\n 'resultado inteiro (partes por 44 digitos): '\\\n 'num_partes=%s' % num_partes\n\n salto = 44 // num_partes\n return [self._campos[n:(n + salto)] for n in range(0, 44, salto)]", "response": "Return a lista de strings contendo a chave do CF - e - SAT."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef validar_casas_decimais(valor, minimo=1, maximo=2):\n atributos = valor.as_tuple()\n if not (minimo <= abs(atributos.exponent) <= maximo):\n raise ValueError('Numero de casas decimais fora dos limites esperados '\n '(valor={!r}, minimo={!r}, maximo={!r}): {!r}'.format(\n valor, minimo, maximo, atributos))", "response": "Valida o n\u00famero de casas decimais."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef uf_pelo_codigo(codigo_ibge):\n idx = [i for s, i, n, r in UNIDADES_FEDERACAO].index(codigo_ibge)\n return UNIDADES_FEDERACAO[idx][_UF_SIGLA]", "response": "Retorna a UF para o c\u00f3digo do IBGE informado."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef codigo_ibge_uf(sigla):\n idx = [s for s, i, n, r in UNIDADES_FEDERACAO].index(sigla)\n return UNIDADES_FEDERACAO[idx][_UF_CODIGO_IBGE]", "response": "Retorna o c\u00f3digo do IBGE para a UF informada."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef cnpj(numero):\n _digitos = [int(c) for c in numero if c.isdigit()]\n\n if len(_digitos) != 14 or len(numero) != 14:\n raise NumeroCNPJError('Nao possui 14 digitos: {!r}'.format(numero))\n\n if numero == numero[0] * 14:\n raise NumeroCNPJError('Todos os digitos iguais: {!r}'.format(numero))\n\n multiplicadores = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]\n\n soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(12)])\n soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(13)])\n digito1 = 11 - (soma1 % 11)\n digito2 = 11 - (soma2 % 11)\n\n if digito1 >= 10:\n digito1 = 0\n\n if digito2 >= 10:\n digito2 = 0\n\n if _digitos[12] != digito1 or _digitos[13] != digito2:\n raise NumeroCNPJError('Digitos verificadores invalidos: {!r}'.format(numero))", "response": "Valida um n\u00famero de CNPJ."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef cpf(numero):\n _digitos = [int(c) for c in numero if c.isdigit()]\n\n if len(_digitos) != 11 or len(numero) != 11:\n raise NumeroCPFError('Nao possui 11 digitos: {!r}'.format(numero))\n\n if numero == numero[0] * 11:\n raise NumeroCPFError('Todos os digitos iguais: {!r}'.format(numero))\n\n multiplicadores = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]\n\n soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(9)])\n soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(10)])\n digito1 = 11 - (soma1 % 11)\n digito2 = 11 - (soma2 % 11)\n\n if digito1 >= 10:\n digito1 = 0\n\n if digito2 >= 10:\n digito2 = 0\n\n if _digitos[9] != digito1 or _digitos[10] != digito2:\n raise NumeroCPFError('Digitos verificadores invalidos: {!r}'.format(numero))", "response": "Valida um n\u00famero de CPF."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef is_cpf(numero, estrito=False):\n try:\n cpf(digitos(numero) if not estrito else numero)\n return True\n except NumeroCPFError:\n pass\n return False", "response": "A funziona se puede um numero CPF."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef as_cpf(numero):\n _num = digitos(numero)\n if is_cpf(_num):\n return '{}.{}.{}-{}'.format(_num[:3], _num[3:6], _num[6:9], _num[9:])\n return numero", "response": "Formata um n\u00famero de CPF retorna o argumento sem qualquer modifica\u00e7\u00e3o."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef is_cnpjcpf(numero, estrito=False):\n _numero = digitos(numero) if not estrito else numero\n try:\n cnpj(_numero)\n return True\n except NumeroCNPJError:\n try:\n cpf(_numero)\n return True\n except NumeroCPFError:\n pass\n return False", "response": "A funziona o numero de verifica se puede um CNPJ CPF."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef as_cnpjcpf(numero):\n if is_cnpj(numero):\n return as_cnpj(numero)\n elif is_cpf(numero):\n return as_cpf(numero)\n return numero", "response": "Formata um n\u00famero de CNPJ ou CPF."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef cep(numero):\n _digitos = digitos(numero)\n\n if len(_digitos) != 8 or len(numero) != 8:\n raise NumeroCEPError('CEP \"%s\" nao possui 8 digitos' % numero)\n\n elif _digitos[0] * 8 == _digitos:\n raise NumeroCEPError('CEP \"%s\" considerado invalido' % numero)", "response": "Valida um n\u00famero de CEP."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef is_cep(numero, estrito=False):\n try:\n cep(digitos(numero) if not estrito else numero)\n return True\n except NumeroCEPError:\n pass\n return False", "response": "A funziona se puede um numero de verifica. Se verdadeiro para usar em testes condicionais."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef as_cep(numero):\n _numero = digitos(numero)\n if is_cep(_numero):\n return '{}-{}'.format(_numero[:5], _numero[5:])\n return numero", "response": "Formata um n\u00famero de CEP para um numero."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef convert_tree(message, config, indent=0, wrap_alternative=True, charset=None):\n ct = message.get_content_type()\n cs = message.get_content_subtype()\n if charset is None:\n charset = get_charset_from_message_fragment(message)\n if not message.is_multipart():\n # we're on a leaf\n converted = None\n disposition = message.get('Content-Disposition', 'inline')\n if disposition == 'inline' and ct in ('text/plain', 'text/markdown'):\n converted = convert_one(message, config, charset)\n if converted is not None:\n if wrap_alternative:\n new_tree = MIMEMultipart('alternative')\n _move_headers(message, new_tree)\n new_tree.attach(message)\n new_tree.attach(converted)\n return new_tree, True\n else:\n return converted, True\n return message, False\n else:\n if ct == 'multipart/signed':\n # if this is a multipart/signed message, then let's just\n # recurse into the non-signature part\n new_root = MIMEMultipart('alternative')\n if message.preamble:\n new_root.preamble = message.preamble\n _move_headers(message, new_root)\n converted = None\n for part in message.get_payload():\n if part.get_content_type() != 'application/pgp-signature':\n converted, did_conversion = convert_tree(part, config, indent=indent + 1,\n wrap_alternative=False,\n charset=charset)\n if did_conversion:\n new_root.attach(converted)\n new_root.attach(message)\n return new_root, did_conversion\n else:\n did_conversion = False\n new_root = MIMEMultipart(cs, message.get_charset())\n if message.preamble:\n new_root.preamble = message.preamble\n _move_headers(message, new_root)\n for part in message.get_payload():\n part, did_this_conversion = convert_tree(part, config, indent=indent + 1, charset=charset)\n did_conversion |= did_this_conversion\n new_root.attach(part)\n return new_root, did_conversion", "response": "Recursively convert a potentially - multipart message into a new MIMEMultipart."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef smtp_connection(c):\n if c.smtp_ssl:\n klass = smtplib.SMTP_SSL\n else:\n klass = smtplib.SMTP\n conn = klass(c.smtp_host, c.smtp_port, timeout=c.smtp_timeout)\n if not c.smtp_ssl:\n conn.ehlo()\n conn.starttls()\n conn.ehlo()\n if c.smtp_username:\n conn.login(c.smtp_username, c.smtp_password)\n return conn", "response": "Create an SMTP connection from a Config object"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the list of available issuers for this payment method.", "response": "def issuers(self):\n \"\"\"Return the list of available issuers for this payment method.\"\"\"\n issuers = self._get_property('issuers') or []\n result = {\n '_embedded': {\n 'issuers': issuers,\n },\n 'count': len(issuers),\n }\n return List(result, Issuer)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncancel a payment and return the updated payment object.", "response": "def delete(self, payment_id, data=None):\n \"\"\"Cancel payment and return the payment object.\n\n Deleting a payment causes the payment status to change to canceled.\n The updated payment object is returned.\n \"\"\"\n if not payment_id or not payment_id.startswith(self.RESOURCE_ID_PREFIX):\n raise IdentifierError(\n \"Invalid payment ID: '{id}'. A payment ID should start with '{prefix}'.\".format(\n id=payment_id, prefix=self.RESOURCE_ID_PREFIX)\n )\n result = super(Payments, self).delete(payment_id, data)\n return self.get_resource_object(result)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the mandate for this payment.", "response": "def mandate(self):\n \"\"\"Return the mandate for this payment.\"\"\"\n return self.client.customer_mandates.with_parent_id(self.customer_id).get(self.mandate_id)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef subscription(self):\n return self.client.customer_subscriptions.with_parent_id(self.customer_id).get(self.subscription_id)", "response": "Return the subscription for this payment."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the order for this payment.", "response": "def order(self):\n \"\"\"Return the order for this payment. \"\"\"\n from ..resources.orders import Order\n url = self._get_link('order')\n if url:\n resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url)\n return Order(resp, self.client)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nread the version from a file in the repository.", "response": "def get_version():\n \"\"\"\n Read the version from a file (mollie/api/version.py) in the repository.\n\n We can't import here since we might import from an installed version.\n \"\"\"\n try:\n version_file = open(os.path.join(ROOT_DIR, 'mollie', 'api', 'version.py'), encoding='utf=8')\n except TypeError:\n # support python 2\n version_file = open(os.path.join(ROOT_DIR, 'mollie', 'api', 'version.py'))\n contents = version_file.read()\n match = re.search(r'VERSION = [\\'\"]([^\\'\"]+)', contents)\n if match:\n return match.group(1)\n else:\n raise RuntimeError(\"Can't determine package version\")"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncreate a refund for the order. When no data arg is given a refund for all order lines is assumed.", "response": "def create_refund(self, data=None, **params):\n \"\"\"Create a refund for the order. When no data arg is given, a refund for all order lines is assumed.\"\"\"\n if data is None:\n data = {'lines': []}\n refund = OrderRefunds(self.client).on(self).create(data, **params)\n return refund"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncancel the lines given.", "response": "def cancel_lines(self, data=None):\n \"\"\"Cancel the lines given. When no lines are given, cancel all the lines.\n\n Canceling an order line causes the order line status to change to canceled.\n An empty dictionary will be returned.\n \"\"\"\n from ..resources.order_lines import OrderLines\n\n if data is None:\n data = {'lines': []}\n canceled = OrderLines(self.client).on(self).delete(data)\n return canceled"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef update_line(self, resource_id, data):\n return OrderLines(self.client).on(self).update(resource_id, data)", "response": "Update a line for an order."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating a shipment for an order. When no data arg is given a shipment for all lines is assumed.", "response": "def create_shipment(self, data=None):\n \"\"\"Create a shipment for an order. When no data arg is given, a shipment for all order lines is assumed.\"\"\"\n if data is None:\n data = {'lines': []}\n return Shipments(self.client).on(self).create(data)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_shipment(self, resource_id):\n return Shipments(self.client).on(self).get(resource_id)", "response": "Retrieve a single shipment by a shipment s ID."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef update_shipment(self, resource_id, data):\n return Shipments(self.client).on(self).update(resource_id, data)", "response": "Update the tracking information of a shipment."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_payment(self, data):\n return OrderPayments(self.client).on(self).create(data)", "response": "Creates a new payment object for an order."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nupdating the orderline with the given id.", "response": "def update(self, resource_id, data=None, **params):\n \"\"\"\n Custom handling for updating orderlines.\n\n The API returns an Order object. Since we are sending the request through an orderline object, it makes more\n sense to convert the returned object to to the updated orderline object.\n\n If you wish to retrieve the order object, you can do so by using the order_id property of the orderline.\n \"\"\"\n path = self.get_resource_name() + '/' + str(resource_id)\n result = self.perform_api_call(self.REST_UPDATE, path, data=data)\n\n for line in result['lines']:\n if line['id'] == resource_id:\n return self.get_resource_object(line)\n raise DataConsistencyError('Line id {resource_id} not found in response.'.format(resource_id=resource_id))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_next(self):\n url = self._get_link('next')\n resource = self.object_type.get_resource_class(self.client)\n resp = resource.perform_api_call(resource.REST_READ, url)\n return List(resp, self.object_type, self.client)", "response": "Return the next set of objects in a list"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncancels a subscription and return the subscription object.", "response": "def delete(self, subscription_id, data=None):\n \"\"\"Cancel subscription and return the subscription object.\n\n Deleting a subscription causes the subscription status to changed to 'canceled'.\n The updated subscription object is returned.\n \"\"\"\n if not subscription_id or not subscription_id.startswith(self.RESOURCE_ID_PREFIX):\n raise IdentifierError(\n \"Invalid subscription ID: '{id}'. A subscription ID should start with '{prefix}'.\".format(\n id=subscription_id, prefix=self.RESOURCE_ID_PREFIX)\n )\n result = super(CustomerSubscriptions, self).delete(subscription_id, data)\n return self.get_resource_object(result)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nverify the chargeback ID and retrieve the chargeback from the API.", "response": "def get(self, chargeback_id, **params):\n \"\"\"Verify the chargeback ID and retrieve the chargeback from the API.\"\"\"\n if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX):\n raise IdentifierError(\n \"Invalid chargeback ID: '{id}'. A chargeback ID should start with '{prefix}'.\".format(\n id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX)\n )\n return super(Chargebacks, self).get(chargeback_id, **params)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ngenerate a querystring suitable for use in the Molecule API.", "response": "def generate_querystring(params):\n \"\"\"\n Generate a querystring suitable for use in the v2 api.\n\n The Requests library doesn't know how to generate querystrings that encode dictionaries using square brackets:\n https://api.mollie.com/v2/methods?amount[value]=100.00&amount[currency]=USD\n\n Note: we use `sorted()` to work around a difference in iteration behaviour between Python 2 and 3.\n This makes the output predictable, and ordering of querystring parameters shouldn't matter.\n \"\"\"\n if not params:\n return None\n parts = []\n for param, value in sorted(params.items()):\n if not isinstance(value, dict):\n parts.append(urlencode({param: value}))\n else:\n # encode dictionary with square brackets\n for key, sub_value in sorted(value.items()):\n composed = '{param}[{key}]'.format(param=param, key=key)\n parts.append(urlencode({composed: sub_value}))\n if parts:\n return '&'.join(parts)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nadds or replace new user - agent component strings.", "response": "def set_user_agent_component(self, key, value, sanitize=True):\n \"\"\"Add or replace new user-agent component strings.\n\n Given strings are formatted along the format agreed upon by Mollie and implementers:\n - key and values are separated by a forward slash (\"/\").\n - multiple key/values are separated by a space.\n - keys are camel-cased, and cannot contain spaces.\n - values cannot contain spaces.\n\n Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.\n \"\"\"\n if sanitize:\n key = ''.join(_x.capitalize() for _x in re.findall(r'\\S+', key))\n if re.search(r'\\s+', value):\n value = '_'.join(re.findall(r'\\S+', value))\n self.user_agent_components[key] = value"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the formatted user agent string.", "response": "def user_agent(self):\n \"\"\"Return the formatted user agent string.\"\"\"\n components = [\"/\".join(x) for x in self.user_agent_components.items()]\n return \" \".join(components)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef factory(resp):\n status = resp['status']\n if status == 401:\n return UnauthorizedError(resp)\n elif status == 404:\n return NotFoundError(resp)\n elif status == 422:\n return UnprocessableEntityError(resp)\n else:\n # generic fallback\n return ResponseError(resp)", "response": "Return a ResponseError subclass based on the API payload."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef delete(self, order_id, data=None):\n if not order_id or not order_id.startswith(self.RESOURCE_ID_PREFIX):\n raise IdentifierError(\n \"Invalid order ID: '{id}'. An order ID should start with '{prefix}'.\".format(\n id=order_id, prefix=self.RESOURCE_ID_PREFIX)\n )\n result = super(Orders, self).delete(order_id, data)\n return self.get_resource_object(result)", "response": "Cancel an order and return the order object."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns the customer for this subscription.", "response": "def customer(self):\n \"\"\"Return the customer for this subscription.\"\"\"\n url = self._get_link('customer')\n if url:\n resp = self.client.customers.perform_api_call(self.client.customers.REST_READ, url)\n return Customer(resp)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning a list of payments for this subscription.", "response": "def payments(self):\n \"\"\"Return a list of payments for this subscription.\"\"\"\n payments = self.client.subscription_payments.on(self).list()\n return payments"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngets the groups the logged in user is a member of.", "response": "def list_groups(self, filtr, url_prefix, auth, session, send_opts):\n \"\"\"Get the groups the logged in user is a member of.\n\n Optionally filter by 'member' or 'maintainer'.\n\n Args:\n filtr (string|None): ['member'|'maintainer'] or defaults to None.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (list[string]): List of group names.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n req = self.get_group_request(\n 'GET', 'application/json', url_prefix, auth)\n if filtr is not None:\n if not filtr == 'member' and not filtr == 'maintainer':\n raise RuntimeError(\n 'filtr must be either \"member\", \"maintainer\", or None.')\n req.params = {'filter': filtr}\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 200:\n resp_json = resp.json()\n return resp_json['groups']\n\n msg = ('List groups failed, got HTTP response: ({}) - {}'.format(\n resp.status_code, resp.text))\n\n raise HTTPError(msg, request = req, response = resp)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef delete_group(self, name, url_prefix, auth, session, send_opts):\n req = self.get_group_request(\n 'DELETE', 'application/json', url_prefix, auth, name)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 204:\n return\n\n msg = ('Delete failed for group {}, got HTTP response: ({}) - {}'.format(\n name, resp.status_code, resp.text))\n raise HTTPError(msg, request = req, response = resp)", "response": "Delete given group.\n\n Args:\n name (string): Name of group.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Raises:\n requests.HTTPError on failure."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngets the members of a group.", "response": "def list_group_members(self, name, url_prefix, auth, session, send_opts):\n \"\"\"Get the members of a group (does not include maintainers).\n\n Args:\n name (string): Name of group to query.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (list[string]): List of member names.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n req = self.get_group_members_request(\n 'GET', 'application/json', url_prefix, auth, name)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 200:\n resp_json = resp.json()\n return resp_json['members']\n\n msg = ('Failed getting members of group {}, got HTTP response: ({}) - {}'.format(\n name, resp.status_code, resp.text))\n raise HTTPError(msg, request = req, response = resp)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef list_permissions(self, group_name=None, resource=None,\n url_prefix=None, auth=None, session=None, send_opts=None):\n \"\"\"List the permission sets for the logged in user\n\n Optionally filter by resource or group.\n\n Args:\n group_name (string): Name of group to filter on\n resource (intern.resource.boss.BossResource): Identifies which data model object to filter on\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (list[dict]): List of dictionaries of permission sets\n \"\"\"\n filter_params = {}\n if group_name:\n filter_params[\"group\"] = group_name\n\n if resource:\n filter_params.update(resource.get_dict_route())\n\n req = self.get_permission_request('GET', 'application/json',\n url_prefix, auth, query_params=filter_params)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code != 200:\n msg = \"Failed to get permission sets. \"\n if group_name:\n msg = \"{} Group: {}\".format(msg, group_name)\n if resource:\n msg = \"{} Resource: {}\".format(msg, resource.name)\n\n msg = '{}, got HTTP response: ({}) - {}'.format(msg, resp.status_code, resp.text)\n\n raise HTTPError(msg, request=req, response=resp)\n else:\n return resp.json()[\"permission-sets\"]", "response": "Returns a list of permission sets for the logged in user."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_permissions(self, group_name, resource, permissions, url_prefix, auth, session, send_opts):\n post_data = {\"group\": group_name,\n \"permissions\": permissions,\n }\n post_data.update(resource.get_dict_route())\n req = self.get_permission_request('POST', 'application/json',\n url_prefix, auth, post_data=post_data)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n\n if resp.status_code != 201:\n msg = ('Failed adding permissions to group {}, got HTTP response: ({}) - {}'.format(group_name,\n resp.status_code,\n resp.text))\n raise HTTPError(msg, request=req, response=resp)", "response": "Adds permissions to a resource in a group."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef delete_permissions(self, grp_name, resource, url_prefix, auth, session, send_opts):\n filter_params = {\"group\": grp_name}\n filter_params.update(resource.get_dict_route())\n\n req = self.get_permission_request('DELETE', 'application/json',\n url_prefix, auth, query_params=filter_params)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 204:\n return\n\n msg = ('Failed deleting permissions to group {}, got HTTP response: ({}) - {}'.format(\n grp_name, resp.status_code, resp.text))\n raise HTTPError(msg, request=req, response=resp)", "response": "Delete permissions for a specific group."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_user_roles(self, user, url_prefix, auth, session, send_opts):\n req = self.get_user_role_request(\n 'GET', 'application/json', url_prefix, auth,\n user)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 200:\n return resp.json()\n\n msg = (\n 'Failed getting roles for user: {}, got HTTP response: ({}) - {}'\n .format(user, resp.status_code, resp.text))\n raise HTTPError(msg, request = req, response = resp)", "response": "Get the roles associated with a user."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef add_user(\n self, user, first_name, last_name, email, password,\n url_prefix, auth, session, send_opts):\n \"\"\"Add a new user.\n\n Args:\n user (string): User name.\n first_name (string): User's first name.\n last_name (string): User's last name.\n email: (string): User's email address.\n password: (string): User's password.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n req = self.get_user_request(\n 'POST', 'application/json', url_prefix, auth,\n user, first_name, last_name, email, password)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 201:\n return\n\n msg = (\n 'Failed adding user: {}, got HTTP response: ({}) - {}'\n .format(user, resp.status_code, resp.text))\n raise HTTPError(msg, request = req, response = resp)", "response": "Add a user to the cache."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nlist all resources of the same type as the given resource.", "response": "def list(self, resource, url_prefix, auth, session, send_opts):\n \"\"\"List all resources of the same type as the given resource.\n\n Args:\n resource (intern.resource.boss.BossResource): List resources of the same type as this..\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (list): List of resources. Each resource is a dictionary.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n req = self.get_request(\n resource, 'GET', 'application/json', url_prefix, auth,\n proj_list_req=True)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 200:\n return self._get_resource_list(resp.json())\n\n err = ('List failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(err, request = req, response = resp)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nupdating an entity in the data model using the given resource.", "response": "def update(self, resource_name, resource, url_prefix, auth, session, send_opts):\n \"\"\"Updates an entity in the data model using the given resource.\n\n Args:\n resource_name (string): Current name of the resource (in case the resource is getting its name changed).\n resource (intern.resource.boss.BossResource): New attributes for the resource.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (intern.resource.boss.BossResource): Returns updated resource of given type on success.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n\n # Create a copy of the resource and change its name to resource_name\n # in case the update includes changing the name of a resource.\n old_resource = copy.deepcopy(resource)\n old_resource.name = resource_name\n\n json = self._get_resource_params(resource, for_update=True)\n\n req = self.get_request(old_resource, 'PUT', 'application/json', url_prefix, auth, json=json)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n\n if resp.status_code == 200:\n return self._create_resource_from_dict(resource, resp.json())\n\n err = ('Update failed on {}, got HTTP response: ({}) - {}'.format(\n old_resource.name, resp.status_code, resp.text))\n raise HTTPError(err, request = req, response = resp)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ndelete the entity described by the given resource.", "response": "def delete(self, resource, url_prefix, auth, session, send_opts):\n \"\"\"Deletes the entity described by the given resource.\n\n Args:\n resource (intern.resource.boss.BossResource)\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n req = self.get_request(\n resource, 'DELETE', 'application/json', url_prefix, auth)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 204:\n return\n\n err = ('Delete failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(err, request = req, response = resp)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngets the parameters for a given resource.", "response": "def _get_resource_params(self, resource, for_update=False):\n \"\"\"Get dictionary containing all parameters for the given resource.\n\n When getting params for a coordinate frame update, only name and\n description are returned because they are the only fields that can\n be updated.\n\n Args:\n resource (intern.resource.boss.resource.BossResource): A sub-class\n whose parameters will be extracted into a dictionary.\n for_update (bool): True if params will be used for an update.\n\n Returns:\n (dictionary): A dictionary containing the resource's parameters as\n required by the Boss API.\n\n Raises:\n TypeError if resource is not a supported class.\n \"\"\"\n if isinstance(resource, CollectionResource):\n return self._get_collection_params(resource)\n\n if isinstance(resource, ExperimentResource):\n return self._get_experiment_params(resource, for_update)\n\n if isinstance(resource, CoordinateFrameResource):\n return self._get_coordinate_params(resource, for_update)\n\n if isinstance(resource, ChannelResource):\n return self._get_channel_params(resource, for_update)\n\n raise TypeError('resource is not supported type.')"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _create_resource_from_dict(self, resource, dict):\n if isinstance(resource, CollectionResource):\n return self._get_collection(dict)\n\n if isinstance(resource, ExperimentResource):\n return self._get_experiment(dict, resource.coll_name)\n\n if isinstance(resource, CoordinateFrameResource):\n return self._get_coordinate(dict)\n\n if isinstance(resource, ChannelResource):\n return self._get_channel(dict, resource.coll_name, resource.exp_name)\n\n raise TypeError('resource is not supported type.')", "response": "Create an instance of the appropriate class based on the given resource and dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _get_resource_list(self, rsrc_dict):\n if 'collections' in rsrc_dict:\n return rsrc_dict['collections']\n if 'experiments' in rsrc_dict:\n return rsrc_dict['experiments']\n if 'channels' in rsrc_dict:\n return rsrc_dict['channels']\n if 'coords' in rsrc_dict:\n return rsrc_dict['coords']\n\n raise RuntimeError('Invalid list response received from Boss. No known resource type returned.')", "response": "Extracts a list of resources from the HTTP response encoded in a dictionary."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_cutout(self, resource, resolution, x_range, y_range, z_range, time_range=None, id_list=[], access_mode=CacheMode.no_cache, **kwargs):\n\n return self.service.get_cutout(\n resource, resolution, x_range, y_range, z_range, time_range, id_list,\n self.url_prefix, self.auth, self.session, self.session_send_opts, access_mode, **kwargs)", "response": "Get a cutout from the volume service."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef reserve_ids(self, resource, num_ids):\n return self.service.reserve_ids(\n resource, num_ids,\n self.url_prefix, self.auth, self.session, self.session_send_opts)", "response": "Reserve a block of unique sequential ids for annotations."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngets the bounding box containing object specified by id.", "response": "def get_bounding_box(self, resource, resolution, id, bb_type='loose'):\n \"\"\"Get bounding box containing object specified by id.\n\n Currently only supports 'loose' bounding boxes. The bounding box\n returned is cuboid aligned.\n\n Args:\n resource (intern.resource.Resource): Resource compatible with annotation operations.\n resolution (int): 0 indicates native resolution.\n id (int): Id of object of interest.\n bb_type (optional[string]): Defaults to 'loose'.\n\n Returns:\n (dict): {'x_range': [0, 10], 'y_range': [0, 10], 'z_range': [0, 10], 't_range': [0, 10]}\n \"\"\"\n return self.service.get_bounding_box(\n resource, resolution, id, bb_type,\n self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting all ids in the specified region.", "response": "def get_ids_in_region(\n self, resource, resolution,\n x_range, y_range, z_range, time_range=[0, 1]):\n \"\"\"Get all ids in the region defined by x_range, y_range, z_range.\n\n Args:\n resource (intern.resource.Resource): An annotation channel.\n resolution (int): 0 indicates native resolution.\n x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.\n y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.\n z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.\n time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1].\n\n Returns:\n (list[int]): Example: [1, 2, 25].\n\n Raises:\n requests.HTTPError\n TypeError: if resource is not an annotation channel.\n \"\"\"\n return self.service.get_ids_in_region(\n resource, resolution, x_range, y_range, z_range, time_range,\n self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, **kwargs):\n return self.service.get_neuroglancer_link(resource, resolution, x_range, y_range, z_range, self.url_prefix, **kwargs)", "response": "Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef list(self, resource, url_prefix, auth, session, send_opts):\n\n req = self.get_metadata_request(\n resource, 'GET', 'application/json', url_prefix, auth)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 200:\n keys_dict = resp.json()\n return keys_dict['keys']\n\n err = ('List failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(err, request = req, response = resp)", "response": "List the metadata keys associated with the given resource."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create(self, resource, keys_vals, url_prefix, auth, session, send_opts):\n success = True\n exc = HTTPErrorList('At least one key-value create failed.')\n\n for pair in keys_vals.items():\n key = pair[0]\n value = pair[1]\n req = self.get_metadata_request(\n resource, 'POST', 'application/json', url_prefix, auth,\n key, value)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 201:\n continue\n\n err = (\n 'Create failed for {}: {}:{}, got HTTP response: ({}) - {}'\n .format(resource.name, key, value, resp.status_code, resp.text))\n exc.http_errors.append(HTTPError(err, request=req, response=resp))\n success = False\n\n if not success:\n raise exc", "response": "Create the given key - value pairs for the given resource."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_bit_width(self, resource):\n datatype = resource.datatype\n\n if \"uint\" in datatype:\n bit_width = int(datatype.split(\"uint\")[1])\n else:\n raise ValueError(\"Unsupported datatype: {}\".format(datatype))\n\n return bit_width", "response": "Method to return the bit width for blosc based on the Resource"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_cutout(\n self, resource, resolution, x_range, y_range, z_range, time_range, numpyVolume,\n url_prefix, auth, session, send_opts):\n \"\"\"Upload a cutout to the Boss data store.\n\n Args:\n resource (intern.resource.resource.Resource): Resource compatible with cutout operations.\n resolution (int): 0 indicates native resolution.\n x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.\n y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.\n z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.\n time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.\n numpyVolume (numpy.array): A 3D or 4D (time) numpy matrix in (time)ZYX order.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n \"\"\"\n\n if numpyVolume.ndim == 3:\n # Can't have time\n if time_range is not None:\n raise ValueError(\n \"You must provide a 4D matrix if specifying a time range\")\n elif numpyVolume.ndim == 4:\n # must have time\n if time_range is None:\n raise ValueError(\n \"You must specifying a time range if providing a 4D matrix\")\n else:\n raise ValueError(\n \"Invalid data format. Only 3D or 4D cutouts are supported. \" +\n \"Number of dimensions: {}\".format(numpyVolume.ndim)\n )\n\n # Check to see if this volume is larger than 1GB. If so, chunk it into\n # several smaller bites:\n if (\n (x_range[1] - x_range[0]) *\n (y_range[1] - y_range[0]) *\n (z_range[1] - z_range[0])\n ) > 1024 * 1024 * 32 * 2:\n blocks = block_compute(\n x_range[0], x_range[1],\n y_range[0], y_range[1],\n z_range[0], z_range[1],\n block_size=(1024, 1024, 32)\n )\n\n for b in blocks:\n _data = np.ascontiguousarray(\n numpyVolume[\n b[2][0] - z_range[0]: b[2][1] - z_range[0],\n b[1][0] - y_range[0]: b[1][1] - y_range[0],\n b[0][0] - x_range[0]: b[0][1] - x_range[0]\n ],\n dtype=numpyVolume.dtype\n )\n self.create_cutout(\n resource, resolution, b[0], b[1], b[2],\n time_range, _data, url_prefix, auth, session, send_opts\n )\n return\n\n compressed = blosc.compress(\n numpyVolume, typesize=self.get_bit_width(resource)\n )\n req = self.get_cutout_request(\n resource, 'POST', 'application/blosc',\n url_prefix, auth,\n resolution, x_range, y_range, z_range, time_range, numpyVolume=compressed)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n\n if resp.status_code == 201:\n return\n\n msg = ('Create cutout failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(msg, request=req, response=resp)", "response": "Creates a cutout for a resource in the Boss data store."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupload a cutout to the Boss data store.", "response": "def get_cutout(\n self, resource, resolution, x_range, y_range, z_range, time_range, id_list,\n url_prefix, auth, session, send_opts, access_mode=CacheMode.no_cache, **kwargs\n ):\n \"\"\"\n Upload a cutout to the Boss data store.\n\n Args:\n resource (intern.resource.resource.Resource): Resource compatible\n with cutout operations\n resolution (int): 0 indicates native resolution.\n x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.\n y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.\n z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.\n time_range ([list[int]]|None): time range such as [30, 40] which means t>=30 and t<40.\n id_list (list[int]): list of object ids to filter the cutout by.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n access_mode (optional [Enum]): Identifies one of three cache access options:\n cache = Will check both cache and for dirty keys\n no_cache = Will skip cache check but check for dirty keys\n raw = Will skip both the cache and dirty keys check\n chunk_size (optional Tuple[int, int, int]): The chunk size to request\n\n\n Returns:\n (numpy.array): A 3D or 4D numpy matrix in ZXY(time) order.\n\n Raises:\n requests.HTTPError\n \"\"\"\n chunk_size = kwargs.pop(\"chunk_size\", (512, 512, 16 * 8))\n # TODO: magic number\n chunk_limit = (chunk_size[0] * chunk_size[1] * chunk_size[2]) * 1.2\n\n # Check to see if this volume is larger than a single request. If so,\n # chunk it into several smaller bites:\n if time_range:\n cutout_size = (\n (x_range[1] - x_range[0]) *\n (y_range[1] - y_range[0]) *\n (z_range[1] - z_range[0]) *\n (time_range[1] - time_range[0])\n )\n else:\n cutout_size = (\n (x_range[1] - x_range[0]) *\n (y_range[1] - y_range[0]) *\n (z_range[1] - z_range[0])\n )\n\n if cutout_size > chunk_limit:\n blocks = block_compute(\n x_range[0], x_range[1],\n y_range[0], y_range[1],\n z_range[0], z_range[1],\n block_size=chunk_size\n )\n\n result = np.ndarray((\n z_range[1] - z_range[0],\n y_range[1] - y_range[0],\n x_range[1] - x_range[0]\n ), dtype=resource.datatype)\n\n for b in blocks:\n _data = self.get_cutout(\n resource, resolution, b[0], b[1], b[2],\n time_range, id_list, url_prefix, auth, session, send_opts, \n access_mode, **kwargs\n )\n\n result[\n b[2][0] - z_range[0] : b[2][1] - z_range[0],\n b[1][0] - y_range[0] : b[1][1] - y_range[0],\n b[0][0] - x_range[0] : b[0][1] - x_range[0]\n ] = _data\n\n return result\n\n req = self.get_cutout_request(\n resource, 'GET', 'application/blosc',\n url_prefix, auth,\n resolution, x_range, y_range, z_range, time_range, access_mode=access_mode,\n id_list=id_list, **kwargs\n )\n prep = session.prepare_request(req)\n # Hack in Accept header for now.\n prep.headers['Accept'] = 'application/blosc'\n resp = session.send(prep, **send_opts)\n\n if resp.status_code == 200:\n raw_data = blosc.decompress(resp.content)\n data_mat = np.fromstring(raw_data, dtype=resource.datatype)\n\n if time_range:\n # Reshape including time\n return np.reshape(data_mat,\n (time_range[1] - time_range[0],\n z_range[1] - z_range[0],\n y_range[1] - y_range[0],\n x_range[1] - x_range[0]),\n order='C')\n else:\n # Reshape without including time\n return np.reshape(data_mat,\n (z_range[1] - z_range[0],\n y_range[1] - y_range[0],\n x_range[1] - x_range[0]),\n order='C')\n\n msg = ('Get cutout failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(msg, request=req, response=resp)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreserving a block of unique sequential ids for annotations.", "response": "def reserve_ids(\n self, resource, num_ids,\n url_prefix, auth, session, send_opts):\n \"\"\"Reserve a block of unique, sequential ids for annotations.\n\n Args:\n resource (intern.resource.Resource): Resource should be an annotation channel.\n num_ids (int): Number of ids to reserve.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (int): First id reserved.\n\n Raises:\n (TypeError): resource is not a channel or not an annotation channel.\n\n \"\"\"\n if not isinstance(resource, ChannelResource):\n raise TypeError('resource must be ChannelResource')\n if resource.type != 'annotation':\n raise TypeError('Channel is not an annotation channel')\n\n req = self.get_reserve_request(\n resource, 'GET', 'application/json', url_prefix, auth, num_ids)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n\n if resp.status_code == 200:\n json_data = resp.json()\n return int(json_data['start_id'])\n\n msg = ('Reserve ids failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(msg, request=req, response=resp)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_bounding_box(\n self, resource, resolution, _id, bb_type,\n url_prefix, auth, session, send_opts):\n \"\"\"Get bounding box containing object specified by id.\n\n Currently only supports 'loose' bounding boxes. The bounding box\n returned is cuboid aligned.\n\n Args:\n resource (intern.resource.Resource): Resource compatible with annotation operations.\n resolution (int): 0 indicates native resolution.\n _id (int): Id of object of interest.\n bb_type (string): Defaults to 'loose'.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (dict): {'x_range': [0, 10], 'y_range': [0, 10], 'z_range': [0, 10], 't_range': [0, 10]}\n\n Raises:\n requests.HTTPError\n TypeError: if resource is not an annotation channel.\n \"\"\"\n if not isinstance(resource, ChannelResource):\n raise TypeError('resource must be ChannelResource')\n if resource.type != 'annotation':\n raise TypeError('Channel is not an annotation channel')\n\n req = self.get_bounding_box_request(\n resource, 'GET', 'application/json', url_prefix, auth, resolution,\n _id, bb_type)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n\n if resp.status_code == 200:\n json_data = resp.json()\n return json_data\n\n msg = ('Get bounding box failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(msg, request=req, response=resp)", "response": "Get the bounding box containing object specified by id."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_ids_in_region(\n self, resource, resolution, x_range, y_range, z_range, time_range,\n url_prefix, auth, session, send_opts):\n \"\"\"Get all ids in the region defined by x_range, y_range, z_range.\n\n Args:\n resource (intern.resource.Resource): An annotation channel.\n resolution (int): 0 indicates native resolution.\n x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.\n y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.\n z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.\n time_range (list[int]]): time range such as [30, 40] which means t>=30 and t<40.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n auth (string): Token to send in the request header.\n session (requests.Session): HTTP session to use for request.\n send_opts (dictionary): Additional arguments to pass to session.send().\n\n Returns:\n (list[int]): Example: [1, 2, 25].\n\n Raises:\n requests.HTTPError\n TypeError: if resource is not an annotation channel.\n \"\"\"\n if not isinstance(resource, ChannelResource):\n raise TypeError('resource must be ChannelResource')\n if resource.type != 'annotation':\n raise TypeError('Channel is not an annotation channel')\n\n req = self.get_ids_request(\n resource, 'GET', 'application/json', url_prefix, auth,\n resolution, x_range, y_range, z_range, time_range)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n\n if resp.status_code == 200:\n json_data = resp.json()\n id_str_list = json_data['ids']\n id_int_list = [int(i) for i in id_str_list]\n return id_int_list\n\n msg = ('Get bounding box failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(msg, request=req, response=resp)", "response": "Get all the ids in the specified region."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget a neuroglancer link of the cutout specified from the host specified in the remote configuration step.", "response": "def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, url_prefix, **kwargs):\n \"\"\"\n Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.\n\n Args:\n resource (intern.resource.Resource): Resource compatible with cutout operations.\n resolution (int): 0 indicates native resolution.\n x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.\n y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.\n z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.\n url_prefix (string): Protocol + host such as https://api.theboss.io\n\n Returns:\n (string): Return neuroglancer link.\n\n Raises:\n RuntimeError when given invalid resource.\n Other exceptions may be raised depending on the volume service's implementation.\n \"\"\"\n link = \"https://neuroglancer.theboss.io/#!{'layers':{'\" + str(resource.name) + \"':{'type':'\" + resource.type + \"'_'source':\" + \"'boss://\" + url_prefix+ \"/\" + resource.coll_name + \"/\" + resource.exp_name + \"/\" + resource.name + \"'}}_'navigation':{'pose':{'position':{'voxelCoordinates':[\" + str(x_range[0]) + \"_\" + str(y_range[0]) + \"_\" + str(z_range[0]) + \"]}}}}\"\n return link"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_graphviz_dot(machine):\n s = []\n s.append('digraph G {\\n')\n s.append('node [shape=box style=rounded fontname=Helvetica];\\n')\n s.append('edge [ fontname=Helvetica ];\\n')\n # initial state\n s.append('initial [shape=point width=0.2];\\n')\n # final states\n counter = 1\n for t_id in machine._table:\n transition = machine._table[t_id]\n if not transition.internal:\n if transition.target is 'final':\n s.append('f{} [shape=doublecircle width=0.1 label=\"\" style=filled fillcolor=black];\\n'.format(counter))\n counter = counter + 1\n\n for state_name in machine._states:\n s.append(_print_state(machine._states[state_name]))\n # initial transition\n counter = 0\n s.append(_print_transition(machine._intial_transition, counter))\n counter = 1\n for t_id in machine._table:\n transition = machine._table[t_id]\n if not transition.internal:\n s.append(_print_transition(transition, counter))\n counter = counter + 1\n s.append('}')\n return ''.join(s)", "response": "Returns the graph of the state machine in Graphviz."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _parse_arg_list(arglist):\n args = []\n for arg in arglist.split(','):\n arg = arg.strip()\n if arg: # string is not empty\n args.append(eval(arg))\n return args", "response": "Parses a list of arguments."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nparse a single action item.", "response": "def _parse_action(action):\n \"\"\"\n Parses a single action item, for instance one of the following:\n\n m; m(); m(True); m(*)\n\n The brackets must match.\n \"\"\"\n i_open = action.find('(')\n if i_open is -1:\n # return action name, finished\n return {'name': action, 'args': [], 'event_args': False}\n # we need to parse the arguments\n i_close = action.rfind(')')\n if i_close is -1:\n raise Exception('Bracket in argument opened but not closed.')\n action_name = action[:i_open]\n arglist = action[i_open+1:i_close].strip()\n if not arglist:\n # no arglist, just return method name\n return {'name': action_name, 'args': [], 'event_args': False}\n if '*' in arglist:\n return {'name': action_name, 'args': [], 'event_args': True}\n return {'name': action_name, 'args': _parse_arg_list(arglist), 'event_args': False}"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nparse a list of actions from the effect attribute of .", "response": "def _parse_action_list_attribute(attribute):\n \"\"\"\n Parses a list of actions, as found in the effect attribute of\n transitions, and the enry and exit actions of states.\n\n Actions are separated by a semicolon, surrounded by any amount of\n whitespace. A action can have the following form:\n\n m; m(); m(True); m(*)\n\n The asterisk that the state machine should provide the args and\n kwargs from the incoming event.\n \"\"\"\n actions = []\n for action_call in attribute.split(';'):\n action_call = action_call.strip()\n if action_call: # string is not empty\n actions.append(_parse_action(action_call))\n return actions"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nproviding a snapshot of the current status.", "response": "def print_status(self):\n \"\"\"Provide a snapshot of the current status.\"\"\"\n s = []\n s.append('=== State Machines: ===\\n')\n for stm_id in Driver._stms_by_id:\n stm = Driver._stms_by_id[stm_id]\n s.append(' - {} in state {}\\n'.format(stm.id, stm.state))\n s.append('=== Events in Queue: ===\\n')\n for event in self._event_queue.queue:\n if event is not None:\n s.append(' - {} for {} with args:{} kwargs:{}\\n'.format(\n event['id'], event['stm'].id,\n event['args'], event['kwargs']))\n s.append('=== Active Timers: {} ===\\n'.format(len(self._timer_queue)))\n for timer in self._timer_queue:\n s.append(' - {} for {} with timeout {}\\n'.format(\n timer['id'], timer['stm'].id, timer['timeout']))\n s.append('=== ================ ===\\n')\n return ''.join(s)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef add_machine(self, machine):\n self._logger.debug('Adding machine {} to driver'.format(machine.id))\n machine._driver = self\n machine._reset()\n if machine.id is not None:\n # TODO warning when STM already registered\n Driver._stms_by_id[machine.id] = machine\n self._add_event(event_id=None, args=[], kwargs={}, stm=machine)", "response": "Add the state machine to this driver."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef start(self, max_transitions=None, keep_active=False):\n self._active = True\n self._max_transitions = max_transitions\n self._keep_active = keep_active\n self.thread = Thread(target=self._start_loop)\n self.thread.start()", "response": "Start the driver.\n\n This method creates a thread which runs the event loop.\n The method returns immediately. To wait until the driver\n finishes, use `stmpy.Driver.wait_until_finished`.\n\n `max_transitions`: execute only this number of transitions, then stop\n `keep_active`: When true, keep the driver running even when all state\n machines terminated"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef wait_until_finished(self):\n try:\n self.thread.join()\n except KeyboardInterrupt:\n self._logger.debug('Keyboard interrupt detected, stopping driver.')\n self._active = False\n self._wake_queue()", "response": "Blocking method to wait until the driver finished execution."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nchecking for expired timers.", "response": "def _check_timers(self):\n \"\"\"\n Check for expired timers.\n\n If there are any timers that expired, place them in the event\n queue.\n \"\"\"\n if self._timer_queue:\n timer = self._timer_queue[0]\n if timer['timeout_abs'] < _current_time_millis():\n # the timer is expired, remove first element in queue\n self._timer_queue.pop(0)\n # put into the event queue\n self._logger.debug('Timer {} expired for stm {}, adding it to event queue.'.format(timer['id'], timer['stm'].id))\n self._add_event(timer['id'], [], {}, timer['stm'], front=True)\n # not necessary to set next timeout,\n # complete check timers will be called again\n else:\n self._next_timeout = (\n timer['timeout_abs'] - _current_time_millis()) / 1000\n if self._next_timeout < 0:\n self._next_timeout = 0\n else:\n self._next_timeout = None"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsending a message to a state machine.", "response": "def send(self, message_id, stm_id, args=[], kwargs={}):\n \"\"\"\n Send a message to a state machine handled by this driver.\n\n If you have a reference to the state machine, you can also send it\n directly to it by using `stmpy.Machine.send`.\n\n `stm_id` must be the id of a state machine earlier added to the driver.\n \"\"\"\n if stm_id not in Driver._stms_by_id:\n self._logger.warn('Machine with name {} cannot be found. '\n 'Ignoring message {}.'.format(stm_id, message_id))\n else:\n stm = Driver._stms_by_id[stm_id]\n self._add_event(message_id, args, kwargs, stm)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nstarts a timer or restart an active one.", "response": "def start_timer(self, timer_id, timeout):\n \"\"\"\n Start a timer or restart an active one.\n\n The timeout is given in milliseconds. If a timer with the\n same name already exists, it is restarted with the specified timeout.\n Note that the timeout is intended as the minimum time until the timer's\n expiration, but may vary due to the state of the event queue and the\n load of the system.\n \"\"\"\n self._logger.debug('Start timer {}\u00a0in stm {}'.format(timer_id, self.id))\n self._driver._start_timer(timer_id, timeout, self)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nstop a timer. If the timer is not active, nothing happens.", "response": "def stop_timer(self, timer_id):\n \"\"\"\n Stop a timer.\n\n If the timer is not active, nothing happens.\n \"\"\"\n self._logger.debug('Stop timer {}\u00a0in stm {}'.format(timer_id, self.id))\n self._driver._stop_timer(timer_id, self)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsends a message to this state machine.", "response": "def send(self, message_id, args=[], kwargs={}):\n \"\"\"\n Send a message to this state machine.\n\n To send a message to a state machine by its name, use\n `stmpy.Driver.send` instead.\n \"\"\"\n self._logger.debug('Send {}\u00a0in stm {}'.format(message_id, self.id))\n self._driver._add_event(\n event_id=message_id, args=args, kwargs=kwargs, stm=self)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef datatype(self, value):\n self._datatype = self.validate_datatype(value)\n self._cutout_ready = True", "response": "Sets the datatype of the object."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list(self, resource):\n return self.service.list(\n resource, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "List the metadata keys associated with the given resource."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef list_groups(self, filtr=None):\n return self.service.list_groups(\n filtr, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "Returns a list of the groups the logged in user is a member of."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_group(self, name, user_name=None):\n return self.service.get_group(\n name, user_name, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "Get owner of group and the resources it s attached to."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a new group.", "response": "def create_group(self, name):\n \"\"\"Create a new group.\n\n Args:\n name (string): Name of the group to create.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.service.create_group(\n name, self.url_prefix, self.auth, self.session,\n self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ndelete given group. Args: name (string): Name of group. Raises: requests.HTTPError on failure.", "response": "def delete_group(self, name):\n \"\"\"Delete given group.\n\n Args:\n name (string): Name of group.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.service.delete_group(\n name, self.url_prefix, self.auth, self.session,\n self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_group_members(self, name):\n return self.service.list_group_members(\n name, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "Get the members of a group."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets the maintainers of a group.", "response": "def list_group_maintainers(self, name):\n \"\"\"Get the maintainers of a group.\n\n Args:\n name (string): Name of group to query.\n\n Returns:\n (list[string]): List of maintainer names.\n \"\"\"\n return self.service.list_group_maintainers(\n name, self.url_prefix, self.auth, self.session,\n self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef add_group_maintainer(self, name, user):\n self.service.add_group_maintainer(\n name, user, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "Add the given user to the named group."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef delete_group_maintainer(self, grp_name, user):\n self.service.delete_group_maintainer(\n grp_name, user, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "Delete the given user from the named group."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nlisting permissions associated with group and resource.", "response": "def list_permissions(self, group_name=None, resource=None):\n \"\"\"List permission sets associated filtering by group and/or resource.\n\n Args:\n group_name (string): Name of group.\n resource (intern.resource.boss.Resource): Identifies which data model object to operate on.\n\n Returns:\n (list): List of permissions.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n return self.service.list_permissions(group_name, resource,\n self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nadd additional permissions for the given resource.", "response": "def add_permissions(self, grp_name, resource, permissions):\n \"\"\" Add additional permissions for the group associated with the given resource.\n\n Args:\n grp_name (string): Name of group.\n resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.\n permissions (list): List of permissions to add to the given resource.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.service.add_permissions(\n grp_name, resource, permissions,\n self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef delete_permissions(self, grp_name, resource):\n self.service.delete_permissions(\n grp_name, resource, self.url_prefix, self.auth, self.session, self.session_send_opts)", "response": "Removes permissions from the group for the given resource."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nadd role to given user.", "response": "def add_user_role(self, user, role):\n \"\"\"Add role to given user.\n\n Args:\n user (string): User name.\n role (string): Role to assign.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.service.add_user_role(\n user, role,\n self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ngets roles associated with the given user.", "response": "def get_user_roles(self, user):\n \"\"\"Get roles associated with the given user.\n\n Args:\n user (string): User name.\n\n Returns:\n (list): List of roles that user has.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n return self.service.get_user_roles(\n user, self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef add_user(\n self, user, first_name=None, last_name=None, email=None, password=None):\n \"\"\"Add a new user.\n\n Args:\n user (string): User name.\n first_name (optional[string]): User's first name. Defaults to None.\n last_name (optional[string]): User's last name. Defaults to None.\n email: (optional[string]): User's email address. Defaults to None.\n password: (optional[string]): User's password. Defaults to None.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.service.add_user(\n user, first_name, last_name, email, password,\n self.url_prefix, self.auth, self.session, self.session_send_opts)", "response": "Adds a user to the cache."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_user(self, user):\n return self.service.get_user(\n user, self.url_prefix, self.auth, self.session, self.session_send_opts)", "response": "Get user s data."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndelete the given user.", "response": "def delete_user(self, user):\n \"\"\"Delete the given user.\n\n Args:\n user (string): User name.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.service.delete_user(\n user, self.url_prefix, self.auth, self.session, self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create(self, resource):\n return self.service.create(\n resource, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "response": "Create a new resource."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget attributes of the data model object named by the given resource.", "response": "def get(self, resource):\n \"\"\"Get attributes of the data model object named by the given resource.\n\n Args:\n resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed.\n\n Returns:\n (intern.resource.boss.BossResource): Returns resource of type requested on success.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n return self.service.get(\n resource, self.url_prefix, self.auth, self.session,\n self.session_send_opts)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef snap_to_cube(q_start, q_stop, chunk_depth=16, q_index=1):\n lo = 0\n hi = 0\n # Start by indexing everything at zero for our own sanity\n q_start -= q_index\n q_stop -= q_index\n\n if q_start % chunk_depth == 0:\n lo = q_start\n else:\n lo = q_start - (q_start % chunk_depth)\n\n if q_stop % chunk_depth == 0:\n hi = q_stop\n else:\n hi = q_stop + (chunk_depth - q_stop % chunk_depth)\n\n return [lo + q_index, hi + q_index + 1]", "response": "This function takes in a q - range and returns a 1D bound that starts at a cube boundary and ends at another cube boundary and includes the volume\n inside the bounds."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncomputes the bounding box of small cutouts.", "response": "def block_compute(x_start, x_stop,\n y_start, y_stop,\n z_start, z_stop,\n origin=(0, 0, 0),\n block_size=(512, 512, 16)):\n \"\"\"\n Get bounding box coordinates (in 3D) of small cutouts to request in\n order to reconstitute a larger cutout.\n\n Arguments:\n x_start (int): The lower bound of dimension x\n x_stop (int): The upper bound of dimension x\n y_start (int): The lower bound of dimension y\n y_stop (int): The upper bound of dimension y\n z_start (int): The lower bound of dimension z\n z_stop (int): The upper bound of dimension z\n\n\n Returns:\n [((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)), ... ]\n \"\"\"\n # x\n x_bounds = range(origin[0], x_stop + block_size[0], block_size[0])\n x_bounds = [x for x in x_bounds if (x > x_start and x < x_stop)]\n if len(x_bounds) is 0:\n x_slices = [(x_start, x_stop)]\n else:\n x_slices = []\n for start_x in x_bounds[:-1]:\n x_slices.append((start_x, start_x + block_size[0]))\n x_slices.append((x_start, x_bounds[0]))\n x_slices.append((x_bounds[-1], x_stop))\n\n # y\n y_bounds = range(origin[1], y_stop + block_size[1], block_size[1])\n y_bounds = [y for y in y_bounds if (y > y_start and y < y_stop)]\n if len(y_bounds) is 0:\n y_slices = [(y_start, y_stop)]\n else:\n y_slices = []\n for start_y in y_bounds[:-1]:\n y_slices.append((start_y, start_y + block_size[1]))\n y_slices.append((y_start, y_bounds[0]))\n y_slices.append((y_bounds[-1], y_stop))\n\n # z\n z_bounds = range(origin[2], z_stop + block_size[2], block_size[2])\n z_bounds = [z for z in z_bounds if (z > z_start and z < z_stop)]\n if len(z_bounds) is 0:\n z_slices = [(z_start, z_stop)]\n else:\n z_slices = []\n for start_z in z_bounds[:-1]:\n z_slices.append((start_z, start_z + block_size[2]))\n z_slices.append((z_start, z_bounds[0]))\n z_slices.append((z_bounds[-1], z_stop))\n\n # alright, yuck. but now we have {x, y, z}_slices, each of which hold the\n # start- and end-points of each cube-aligned boundary. For instance, if you\n # requested z-slices 4 through 20, it holds [(4, 16), (16, 20)].\n\n # For my next trick, I'll convert these to a list of:\n # ((x_start, x_stop), (y_start, y_stop), (z_start, z_stop))\n\n chunks = []\n for x in x_slices:\n for y in y_slices:\n for z in z_slices:\n chunks.append((x, y, z))\n return chunks"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ninitializing the Project Service from the config file.", "response": "def _init_project_service(self, version):\n \"\"\"\n Method to initialize the Project Service from the config data\n\n Args:\n version (string): Version of Boss API to use.\n\n Returns:\n None\n\n Raises:\n (KeyError): if given invalid version.\n \"\"\"\n project_cfg = self._load_config_section(CONFIG_PROJECT_SECTION)\n self._token_project = project_cfg[CONFIG_TOKEN]\n proto = project_cfg[CONFIG_PROTOCOL]\n host = project_cfg[CONFIG_HOST]\n\n self._project = ProjectService(host, version)\n self._project.base_protocol = proto\n self._project.set_auth(self._token_project)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _init_metadata_service(self, version):\n metadata_cfg = self._load_config_section(CONFIG_METADATA_SECTION)\n self._token_metadata = metadata_cfg[CONFIG_TOKEN]\n proto = metadata_cfg[CONFIG_PROTOCOL]\n host = metadata_cfg[CONFIG_HOST]\n\n self._metadata = MetadataService(host, version)\n self._metadata.base_protocol = proto\n self._metadata.set_auth(self._token_metadata)", "response": "Method to initialize the Metadata Service from the config file."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ninitialize the Volume Service from the config file.", "response": "def _init_volume_service(self, version):\n \"\"\"\n Method to initialize the Volume Service from the config data\n\n Args:\n version (string): Version of Boss API to use.\n\n Returns:\n None\n\n Raises:\n (KeyError): if given invalid version.\n \"\"\"\n volume_cfg = self._load_config_section(CONFIG_VOLUME_SECTION)\n self._token_volume = volume_cfg[CONFIG_TOKEN]\n proto = volume_cfg[CONFIG_PROTOCOL]\n host = volume_cfg[CONFIG_HOST]\n\n self._volume = VolumeService(host, version)\n self._volume.base_protocol = proto\n self._volume.set_auth(self._token_volume)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _load_config_section(self, section_name):\n if self._config.has_section(section_name):\n # Load specific section\n section = dict(self._config.items(section_name))\n elif self._config.has_section(\"Default\"):\n # Load Default section\n section = dict(self._config.items(\"Default\"))\n else:\n raise KeyError((\n \"'{}' was not found in the configuration file and no default \" +\n \"configuration was provided.\"\n ).format(section_name))\n\n # Make sure section is valid\n if \"protocol\" in section and \"host\" in section and \"token\" in section:\n return section\n else:\n raise KeyError(\n \"Missing values in configuration data. \" +\n \"Must contain: protocol, host, token\"\n )", "response": "Method to load the specific Service section from the config file if it exists or fall back to the default Service section if it does not exist."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef list_groups(self, filtr=None):\n self.project_service.set_auth(self._token_project)\n return self.project_service.list_groups(filtr)", "response": "Returns a list of the groups the logged in user is a member of. Optionally filter by member or maintainer."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ngets information on the given group or whether or not a user is a member of the given group.", "response": "def get_group(self, name, user_name=None):\n \"\"\"\n Get information on the given group or whether or not a user is a member\n of the group.\n\n Args:\n name (string): Name of group to query.\n user_name (optional[string]): Supply None if not interested in\n determining if user is a member of the given group.\n\n Returns:\n (mixed): Dictionary if getting group information or bool if a user\n name is supplied.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_group(name, user_name)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_group(self, name):\n self.project_service.set_auth(self._token_project)\n return self.project_service.create_group(name)", "response": "Creates a new group."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ndelete given group. Args: name (string): Name of group. Returns: (bool): True on success. Raises: requests.HTTPError on failure.", "response": "def delete_group(self, name):\n \"\"\"\n Delete given group.\n\n Args:\n name (string): Name of group.\n\n Returns:\n (bool): True on success.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.delete_group(name)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef list_group_members(self, name):\n self.project_service.set_auth(self._token_project)\n return self.project_service.list_group_members(name)", "response": "Returns the members of a group."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nadd the given user to the named group.", "response": "def add_group_member(self, grp_name, user):\n \"\"\"\n Add the given user to the named group.\n\n Both group and user must already exist for this to succeed.\n\n Args:\n name (string): Name of group.\n user_name (string): User to add to group.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.add_group_member(grp_name, user)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes the given user from the named group.", "response": "def delete_group_member(self, grp_name, user):\n \"\"\"\n Delete the given user to the named group.\n\n Both group and user must already exist for this to succeed.\n\n Args:\n name (string): Name of group.\n user_name (string): User to delete from the group.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.delete_group_member(grp_name, user)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_is_group_member(self, grp_name, user):\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_is_group_member(grp_name, user)", "response": "Checks if the given user is a member of the named group."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_group_maintainers(self, name):\n self.project_service.set_auth(self._token_project)\n return self.project_service.list_group_maintainers(name)", "response": "Returns a list of the names of the maintainers of a group."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nadding the given user to the named group.", "response": "def add_group_maintainer(self, name, user):\n \"\"\"\n Add the given user to the named group.\n\n Both group and user must already exist for this to succeed.\n\n Args:\n name (string): Name of group.\n user (string): User to add to group.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.add_group_maintainer(name, user)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef delete_group_maintainer(self, grp_name, user):\n self.project_service.set_auth(self._token_project)\n self.project_service.delete_group_maintainer(grp_name, user)", "response": "Deletes the given user from the named group."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nchecks if the given user is a member of the named group.", "response": "def get_is_group_maintainer(self, grp_name, user):\n \"\"\"\n Check if the given user is a member of the named group.\n\n Args:\n name (string): Name of group.\n user (string): User of interest.\n\n Returns:\n (bool): False if user not a member.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_is_group_maintainer(grp_name, user)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a list of permissions associated with group and resource.", "response": "def list_permissions(self, group_name=None, resource=None):\n \"\"\"\n List permission sets associated filtering by group and/or resource.\n\n Args:\n group_name (string): Name of group.\n resource (intern.resource.boss.Resource): Identifies which data\n model object to operate on.\n\n Returns:\n (list): List of permissions.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.list_permissions(group_name, resource)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngets permissions associated with the given group with the given resource.", "response": "def get_permissions(self, grp_name, resource):\n \"\"\"\n Get permissions associated the group has with the given resource.\n\n Args:\n grp_name (string): Name of group.\n resource (intern.resource.boss.Resource): Identifies which data\n model object to operate on.\n\n Returns:\n (list): List of permissions.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_permissions(grp_name, resource)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef add_permissions(self, grp_name, resource, permissions):\n self.project_service.set_auth(self._token_project)\n self.project_service.add_permissions(grp_name, resource, permissions)", "response": "Adds additional permissions for the group associated with the resource."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update_permissions(self, grp_name, resource, permissions):\n self.project_service.set_auth(self._token_project)\n self.project_service.update_permissions(grp_name, resource, permissions)", "response": "Updates the permissions for the given group associated with the given resource."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nremoves permissions from the group for the given resource.", "response": "def delete_permissions(self, grp_name, resource):\n \"\"\"\n Removes permissions from the group for the given resource.\n\n Args:\n grp_name (string): Name of group.\n resource (intern.resource.boss.Resource): Identifies which data\n model object to operate on.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.delete_permissions(grp_name, resource)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngetting roles associated with the given user.", "response": "def get_user_roles(self, user):\n \"\"\"\n Get roles associated with the given user.\n\n Args:\n user (string): User name.\n\n Returns:\n (list): List of roles that user has.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_user_roles(user)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nadding role to given user.", "response": "def add_user_role(self, user, role):\n \"\"\"\n Add role to given user.\n\n Args:\n user (string): User name.\n role (string): Role to assign.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.add_user_role(user, role)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nremoving role from given user.", "response": "def delete_user_role(self, user, role):\n \"\"\"\n Remove role from given user.\n\n Args:\n user (string): User name.\n role (string): Role to remove.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.delete_user_role(user, role)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets user s data.", "response": "def get_user(self, user):\n \"\"\"\n Get user's data (first and last name, email, etc).\n\n Args:\n user (string): User name.\n\n Returns:\n (dictionary): User's data encoded in a dictionary.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_user(user)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nget user s group memberships.", "response": "def get_user_groups(self, user):\n \"\"\"\n Get user's group memberships.\n\n Args:\n user (string): User name.\n\n Returns:\n (list): User's groups.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_user_groups(user)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nadding a user to the cache.", "response": "def add_user(\n self, user,\n first_name=None, last_name=None,\n email=None, password=None\n ):\n \"\"\"\n Add a new user.\n\n Args:\n user (string): User name.\n first_name (optional[string]): User's first name. Defaults to None.\n last_name (optional[string]): User's last name. Defaults to None.\n email: (optional[string]): User's email address. Defaults to None.\n password: (optional[string]): User's password. Defaults to None.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.add_user(\n user, first_name, last_name, email, password)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ndeletes the given user from the cache.", "response": "def delete_user(self, user):\n \"\"\"\n Delete the given user.\n\n Args:\n user (string): User name.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.delete_user(user)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nlist all instances of the given resource type.", "response": "def _list_resource(self, resource):\n \"\"\"\n List all instances of the given resource type.\n\n Use the specific list_() methods instead:\n list_collections()\n list_experiments()\n list_channels()\n list_coordinate_frames()\n\n Args:\n resource (intern.resource.boss.BossResource): resource.name may be\n an empty string.\n\n Returns:\n (list)\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return super(BossRemote, self).list_project(resource=resource)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef list_experiments(self, collection_name):\n exp = ExperimentResource(\n name='', collection_name=collection_name, coord_frame='foo')\n return self._list_resource(exp)", "response": "List all experiments in a parent collection."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nlist all channels belonging to the named experiment that is part of the named collection.", "response": "def list_channels(self, collection_name, experiment_name):\n \"\"\"\n List all channels belonging to the named experiment that is part\n of the named collection.\n\n Args:\n collection_name (string): Name of the parent collection.\n experiment_name (string): Name of the parent experiment.\n\n Returns:\n (list)\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n dont_care = 'image'\n chan = ChannelResource(\n name='', collection_name=collection_name,\n experiment_name=experiment_name, type=dont_care)\n return self._list_resource(chan)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_channel(self, chan_name, coll_name, exp_name):\n chan = ChannelResource(chan_name, coll_name, exp_name)\n return self.get_project(chan)", "response": "Helper that gets a fully initialized ChannelResource for an existing channel."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate the project described by the given resource.", "response": "def create_project(self, resource):\n \"\"\"\n Create the entity described by the given resource.\n\n Args:\n resource (intern.resource.boss.BossResource)\n\n Returns:\n (intern.resource.boss.BossResource): Returns resource of type\n requested on success.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.create(resource)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_project(self, resource):\n self.project_service.set_auth(self._token_project)\n return self.project_service.get(resource)", "response": "Get the attributes of the data model object named by the given resource."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the project with the given resource.", "response": "def update_project(self, resource_name, resource):\n \"\"\"\n Updates an entity in the data model using the given resource.\n\n Args:\n resource_name (string): Current name of the resource (in case the\n resource is getting its name changed).\n resource (intern.resource.boss.BossResource): New attributes for\n the resource.\n\n Returns:\n (intern.resource.boss.BossResource): Returns updated resource of\n given type on success.\n\n Raises:\n requests.HTTPError on failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n return self.project_service.update(resource_name, resource)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ndelete the entity described by the given resource.", "response": "def delete_project(self, resource):\n \"\"\"\n Deletes the entity described by the given resource.\n\n Args:\n resource (intern.resource.boss.BossResource)\n\n Raises:\n requests.HTTPError on a failure.\n \"\"\"\n self.project_service.set_auth(self._token_project)\n self.project_service.delete(resource)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_metadata(self, resource):\n self.metadata_service.set_auth(self._token_metadata)\n return self.metadata_service.list(resource)", "response": "List all keys associated with the given resource."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create_metadata(self, resource, keys_vals):\n self.metadata_service.set_auth(self._token_metadata)\n self.metadata_service.create(resource, keys_vals)", "response": "Creates new key - value pairs for the given resource."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_metadata(self, resource, keys):\n self.metadata_service.set_auth(self._token_metadata)\n return self.metadata_service.get(resource, keys)", "response": "Gets the values associated with the given keys associated with the given resource."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update_metadata(self, resource, keys_vals):\n self.metadata_service.set_auth(self._token_metadata)\n self.metadata_service.update(resource, keys_vals)", "response": "Updates the metadata for the given resource."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef delete_metadata(self, resource, keys):\n self.metadata_service.set_auth(self._token_metadata)\n self.metadata_service.delete(resource, keys)", "response": "Deletes the given key - value pairs associated with the given resource."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef parse_bossURI(self, uri): # type: (str) -> Resource\n t = uri.split(\"://\")[1].split(\"/\")\n if len(t) is 3:\n return self.get_channel(t[2], t[0], t[1])\n raise ValueError(\"Cannot parse URI \" + uri + \".\")", "response": "Parse a bossDB URI and handle malform errors."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_cutout(self, resource, resolution, x_range, y_range, z_range, time_range=None, id_list=[], no_cache=None, access_mode=CacheMode.no_cache, **kwargs):\n if no_cache is not None:\n warnings.warn(\"The no-cache option has been deprecated and will not be used in future versions of intern.\")\n warnings.warn(\"Please from intern.service.boss.volume import CacheMode and use access_mode=CacheMode.[cache,no-cache,raw] instead.\")\n if no_cache and access_mode != CacheMode.no_cache:\n warnings.warn(\"Both no_cache and access_mode were used, please use access_mode only. As no_cache has been deprecated. \")\n warnings.warn(\"Your request will be made using the default mode no_cache.\")\n access_mode=CacheMode.no_cache \n if no_cache:\n access_mode=CacheMode.no_cache\n elif no_cache == False:\n access_mode=CacheMode.cache\n return self._volume.get_cutout(resource, resolution, x_range, y_range, z_range, time_range, id_list, access_mode, **kwargs)", "response": "Get a cutout from the volume service."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning a project object for the given experiment name", "response": "def get_experiment(self, coll_name, exp_name):\n \"\"\"\n Convenience method that gets experiment resource.\n \n Args:\n coll_name (str): Collection name\n exp_name (str): Experiment name\n\n Returns:\n (ExperimentResource)\n \"\"\"\n exp = ExperimentResource(exp_name, coll_name)\n return self.get_project(exp)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, **kwargs):\n return self._volume.get_neuroglancer_link(resource, resolution, x_range, y_range, z_range, **kwargs)", "response": "Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef views(model: \"Model\") -> list:\n if not isinstance(model, Model):\n raise TypeError(\"Expected a Model, not %r.\" % model)\n return model._model_views[:]", "response": "Return a list of views keyed on what events they respond to."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nregister a method that reacts before the trigger method is called.", "response": "def before(self, callback: Union[Callable, str]) -> \"Control\":\n \"\"\"Register a control method that reacts before the trigger method is called.\n\n Parameters:\n callback:\n The control method. If given as a callable, then that function will be\n used as the callback. If given as a string, then the control will look\n up a method with that name when reacting (useful when subclassing).\n \"\"\"\n if isinstance(callback, Control):\n callback = callback._before\n self._before = callback\n return self"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nregisters a method that reacts after the trigger method is called.", "response": "def after(self, callback: Union[Callable, str]) -> \"Control\":\n \"\"\"Register a control method that reacts after the trigger method is called.\n\n Parameters:\n callback:\n The control method. If given as a callable, then that function will be\n used as the callback. If given as a string, then the control will look\n up a method with that name when reacting (useful when subclassing).\n \"\"\"\n if isinstance(callback, Control):\n callback = callback._after\n self._after = callback\n return self"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef serve_static(app, base_url, base_path, index=False):\n @app.route(base_url + '/(.*)')\n def serve(env, req):\n \"\"\"\n Static files\n \"\"\"\n try:\n base = pathlib.Path(base_path).resolve()\n path = (base / req.match.group(1)).resolve()\n except FileNotFoundError:\n return Response(None, 404, 'Not Found')\n\n\n # Don't let bad paths through\n if base == path or base in path.parents:\n if path.is_file():\n return ResponseFile(str(path))\n if index and path.is_dir():\n if base == path:\n ret = ''\n else:\n ret = '../
\\r\\n'\n for item in path.iterdir():\n name = item.parts[-1]\n if item.is_dir():\n name += '/'\n ret += '{}
\\r\\n'.format(urllib.parse.quote(name), html.escape(name))\n ret = ResponseString(ret, 'text/html')\n return ret\n\n return Response(None, 404, 'Not Found')", "response": "Serve a directory statically"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nserves API documentation extracted from request handler docstrings", "response": "def serve_doc(app, url):\n \"\"\"\n Serve API documentation extracted from request handler docstrings\n\n Parameters:\n * app: Grole application object\n * url: URL to serve at\n \"\"\"\n @app.route(url, doc=False)\n def index(env, req):\n ret = ''\n for d in env['doc']:\n ret += 'URL: {url}, supported methods: {methods}{doc}\\n'.format(**d)\n return ret"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nparse command line arguments for Grole server running as static file server", "response": "def parse_args(args=sys.argv[1:]):\n \"\"\"\n Parse command line arguments for Grole server running as static file server\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--address', help='address to listen on, default localhost',\n default='localhost')\n parser.add_argument('-p', '--port', help='port to listen on, default 1234',\n default=1234, type=int)\n parser.add_argument('-d', '--directory', help='directory to serve, default .',\n default='.')\n parser.add_argument('-n', '--noindex', help='do not show directory indexes',\n default=False, action='store_true')\n loglevel = parser.add_mutually_exclusive_group()\n loglevel.add_argument('-v', '--verbose', help='verbose logging',\n default=False, action='store_true')\n loglevel.add_argument('-q', '--quiet', help='quiet logging',\n default=False, action='store_true')\n return parser.parse_args(args)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nrunning Grole static file server", "response": "def main(args=sys.argv[1:]):\n \"\"\"\n Run Grole static file server\n \"\"\"\n args = parse_args(args)\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n elif args.quiet:\n logging.basicConfig(level=logging.ERROR)\n else:\n logging.basicConfig(level=logging.INFO)\n app = Grole()\n serve_static(app, '', args.directory, not args.noindex)\n app.run(args.address, args.port)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\nasync def _read(self, reader):\n start_line = await self._readline(reader)\n self.method, self.location, self.version = start_line.decode().split()\n path_query = urllib.parse.unquote(self.location).split('?', 1)\n self.path = path_query[0]\n self.query = {}\n if len(path_query) > 1:\n for q in path_query[1].split('&'):\n try:\n k, v = q.split('=', 1)\n self.query[k] = v\n except ValueError:\n self.query[q] = None\n self.headers = {}\n while True:\n header_raw = await self._readline(reader)\n if header_raw.strip() == b'':\n break\n header = header_raw.decode().split(':', 1)\n self.headers[header[0]] = header[1].strip()\n\n # TODO implement chunked handling\n self.data = b''\n await self._buffer_body(reader)", "response": "Reads the HTTP request into the internal state."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\nasync def _readline(self, reader):\n ret = await reader.readline()\n if len(ret) == 0 and reader.at_eof():\n raise EOFError()\n return ret", "response": "readline is a readline function that returns a string"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\nasync def _buffer_body(self, reader):\n remaining = int(self.headers.get('Content-Length', 0))\n if remaining > 0:\n try:\n self.data = await reader.readexactly(remaining)\n except asyncio.IncompleteReadError:\n raise EOFError()", "response": "Buffers the body of the request."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\nasync def _handle(self, reader, writer):\n peer = writer.get_extra_info('peername')\n self._logger.debug('New connection from {}'.format(peer))\n try:\n # Loop handling requests\n while True:\n # Read the request\n req = Request()\n await req._read(reader)\n\n # Find and execute handler\n res = None\n for path_regex, handler in self._handlers.get(req.method, []):\n match = path_regex.fullmatch(req.path)\n if match:\n req.match = match\n try:\n if inspect.iscoroutinefunction(handler):\n res = await handler(self.env, req)\n else:\n res = handler(self.env, req)\n if not isinstance(res, Response):\n res = Response(data=res)\n except:\n # Error - log it and return 500\n self._logger.error(traceback.format_exc())\n res = Response(code=500, reason='Internal Server Error')\n break\n\n # No handler - send 404\n if res == None:\n res = Response(code=404, reason='Not Found')\n\n # Respond\n await res._write(writer)\n self._logger.info('{}: {} -> {}'.format(peer, req.path, res.code))\n except EOFError:\n self._logger.debug('Connection closed from {}'.format(peer))\n except Exception as e:\n self._logger.error('Connection error ({}) from {}'.format(e, peer))\n writer.close()", "response": "Handle a single TCP connection."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef run(self, host='localhost', port=1234):\n # Setup loop\n loop = asyncio.get_event_loop()\n coro = asyncio.start_server(self._handle, host, port, loop=loop)\n try:\n server = loop.run_until_complete(coro)\n except Exception as e:\n self._logger.error('Could not launch server: {}'.format(e))\n return\n\n # Run the server\n self._logger.info('Serving on {}'.format(server.sockets[0].getsockname()))\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n\n # Close the server\n server.close()\n loop.run_until_complete(server.wait_closed())\n loop.close()", "response": "Launch the server. Will run forever accepting connections until interrupted."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef hold(model: Model, reducer: Optional[Callable] = None) -> Iterator[list]:\n if not isinstance(model, Model):\n raise TypeError(\"Expected a Model, not %r.\" % model)\n events = []\n restore = model.__dict__.get(\"_notify_model_views\")\n model._notify_model_views = lambda e: events.extend(e)\n\n try:\n yield events\n finally:\n if restore is None:\n del model._notify_model_views\n else:\n model._notify_model_views = restore\n\n events = tuple(events)\n\n if reducer is not None:\n events = tuple(map(Data, reducer(model, events)))\n\n model._notify_model_views(events)", "response": "Returns an iterator over the events produced by the given model that are currently in the hold context."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning an iterator that yields the list of events that have not been performed.", "response": "def rollback(\n model: Model, undo: Optional[Callable] = None, *args, **kwargs\n) -> Iterator[list]:\n \"\"\"Withold events if an error occurs.\n\n Generall operate\n\n Parameters:\n model:\n The model object whose change events may be witheld.\n undo:\n An optional function for reversing any changes that may have taken place.\n Its signature is ``(model, events, error)`` where ``model`` is the given\n model, ``event`` is a tuple of all the events that took place, and ``error``\n is the exception that was riased. Any changes that you make to the model\n within this function will not produce events.\n\n Examples:\n\n Simple supression of events:\n\n .. code-block:: python\n\n from spectate import mvc\n\n d = mvc.Dict()\n\n @mvc.view(d)\n def should_not_be_called(d, events):\n # we never call this view\n assert False\n\n try:\n with mvc.rollback(d):\n d[\"a\"] = 1\n d[\"b\"] # key doesn't exist\n except KeyError:\n pass\n\n Undo changes for a dictionary:\n\n .. code-block:: python\n\n from spectate import mvc\n\n def undo_dict_changes(model, events, error):\n seen = set()\n for e in reversed(events):\n if e.old is mvc.Undefined:\n del model[e.key]\n else:\n model[e.key] = e.old\n\n try:\n with mvc.rollback(d, undo=undo_dict_changes):\n d[\"a\"] = 1\n d[\"b\"] = 2\n print(d)\n d[\"c\"]\n except KeyError:\n pass\n print(d)\n\n .. code-block:: python\n\n {'a': 1, 'b': 2}\n {}\n \"\"\"\n with hold(model, *args, **kwargs) as events:\n try:\n yield events\n except Exception as error:\n if undo is not None:\n with mute(model):\n undo(model, tuple(events), error)\n events.clear()\n raise"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nblock a model s views from being notified.", "response": "def mute(model: Model):\n \"\"\"Block a model's views from being notified.\n\n All changes within a \"mute\" context will be blocked. No content is yielded to the\n user as in :func:`hold`, and the views of the model are never notified that changes\n took place.\n\n Parameters:\n mode: The model whose change events will be blocked.\n\n Examples:\n\n The view is never called due to the :func:`mute` context:\n\n .. code-block:: python\n\n from spectate import mvc\n\n l = mvc.List()\n\n @mvc.view(l)\n def raises(events):\n raise ValueError(\"Events occured!\")\n\n with mvc.mute(l):\n l.append(1)\n \"\"\"\n if not isinstance(model, Model):\n raise TypeError(\"Expected a Model, not %r.\" % model)\n restore = model.__dict__.get(\"_notify_model_views\")\n model._notify_model_views = lambda e: None\n try:\n yield\n finally:\n if restore is None:\n del model._notify_model_views\n else:\n model._notify_model_views = restore"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a new type with certain methods exposed to callback registration.", "response": "def expose_as(name, base, *methods):\n \"\"\"Return a new type with certain methods that are exposed to callback registration.\n\n Parameters\n ----------\n name : str\n The name of the new type.\n base : type\n A type such as list or dict.\n *methods : str\n A str representation of the methods that should be exposed to callbacks.\n\n Returns\n -------\n exposed : obj:\n A :class:`Watchable` with methods that will notify a :class:`Spectator`.\n \"\"\"\n classdict = {}\n for method in methods:\n if not hasattr(base, method):\n raise AttributeError(\n \"Cannot expose '%s', because '%s' \"\n \"instances lack this method\" % (method, base.__name__)\n )\n else:\n classdict[method] = MethodSpectator(getattr(base, method), method)\n return type(name, (base, Watchable), classdict)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns True if the given value is a : class : Watchable subclass or instance.", "response": "def watchable(value):\n \"\"\"Returns True if the given value is a :class:`Watchable` subclass or instance.\"\"\"\n check = issubclass if inspect.isclass(value) else isinstance\n return check(value, Watchable)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef watch(value, spectator_type=Spectator):\n if isinstance(value, Watchable):\n wtype = type(value)\n else:\n raise TypeError(\"Expected a Watchable, not %r.\" % value)\n spectator = getattr(value, \"_instance_spectator\", None)\n if not isinstance(spectator, Spectator):\n spectator = spectator_type(wtype)\n value._instance_spectator = spectator\n return spectator", "response": "Register a new Specatator to a Watchable object and return it."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates and return a new watchable with its Specatator.", "response": "def watched(cls, *args, **kwargs):\n \"\"\"Create and return a :class:`Watchable` with its :class:`Specatator`.\n\n See :func:`watch` for more info on :class:`Specatator` registration.\n\n Parameters\n ----------\n cls: type:\n A subclass of :class:`Watchable`\n *args:\n Positional arguments used to create the instance\n **kwargs:\n Keyword arguments used to create the instance.\n \"\"\"\n value = cls(*args, **kwargs)\n return value, watch(value)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns the Specatator of a Watchable instance.", "response": "def unwatch(value):\n \"\"\"Return the :class:`Specatator` of a :class:`Watchable` instance.\"\"\"\n if not isinstance(value, Watchable):\n raise TypeError(\"Expected a Watchable, not %r.\" % value)\n spectator = watcher(value)\n try:\n del value._instance_spectator\n except Exception:\n pass\n return spectator"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef callback(self, name, before=None, after=None):\n if isinstance(name, (list, tuple)):\n for name in name:\n self.callback(name, before, after)\n else:\n if not isinstance(getattr(self.subclass, name), MethodSpectator):\n raise ValueError(\"No method specator for '%s'\" % name)\n if before is None and after is None:\n raise ValueError(\"No pre or post '%s' callbacks were given\" % name)\n elif before is not None and not callable(before):\n raise ValueError(\"Expected a callable, not %r.\" % before)\n elif after is not None and not callable(after):\n raise ValueError(\"Expected a callable, not %r.\" % after)\n elif before is None and after is None:\n raise ValueError(\"No callbacks were given.\")\n if name in self._callback_registry:\n callback_list = self._callback_registry[name]\n else:\n callback_list = []\n self._callback_registry[name] = callback_list\n callback_list.append((before, after))", "response": "Add a callback pair to this class."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nremove a callback pair from this Spectator.", "response": "def remove_callback(self, name, before=None, after=None):\n \"\"\"Remove a beforeback, and afterback pair from this Spectator\n\n If ``before`` and ``after`` are None then all callbacks for\n the given method will be removed. Otherwise, only the exact\n callback pair will be removed.\n\n Parameters\n ----------\n name: str\n The name of the method the callback pair is associated with.\n before: None or callable\n The beforeback that was originally registered to the given method.\n after: None or callable\n The afterback that was originally registered to the given method.\n \"\"\"\n if isinstance(name, (list, tuple)):\n for name in name:\n self.remove_callback(name, before, after)\n elif before is None and after is None:\n del self._callback_registry[name]\n else:\n if name in self._callback_registry:\n callback_list = self._callback_registry[name]\n else:\n callback_list = []\n self._callback_registry[name] = callback_list\n callback_list.remove((before, after))\n if len(callback_list) == 0:\n # cleanup if all callbacks are gone\n del self._callback_registry[name]"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef call(self, obj, name, method, args, kwargs):\n if name in self._callback_registry:\n beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))\n\n hold = []\n for b in beforebacks:\n if b is not None:\n call = Data(name=name, kwargs=kwargs.copy(), args=args)\n v = b(obj, call)\n else:\n v = None\n hold.append(v)\n\n out = method(*args, **kwargs)\n\n for a, bval in zip(afterbacks, hold):\n if a is not None:\n a(obj, Data(before=bval, name=name, value=out))\n elif callable(bval):\n # the beforeback's return value was an\n # afterback that expects to be called\n bval(out)\n return out\n else:\n return method(*args, **kwargs)", "response": "Trigger a method along with its beforebacks and afterbacks."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nshorten given text on first delimiter after given number of characters.", "response": "def cut_to_length(text, length, delim):\n \"\"\"Shorten given text on first delimiter after given number\n of characters.\n \"\"\"\n cut = text.find(delim, length)\n if cut > -1:\n return text[:cut]\n else:\n return text"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_interpreter_path(version=None):\n if version and version != str(sys.version_info[0]):\n return settings.PYTHON_INTERPRETER + version\n else:\n return sys.executable", "response": "Return the executable of a specified or current version."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nfinding out the package license from list of trove classifiers.", "response": "def license_from_trove(trove):\n \"\"\"Finds out license from list of trove classifiers.\n Args:\n trove: list of trove classifiers\n Returns:\n Fedora name of the package license or empty string, if no licensing\n information is found in trove classifiers.\n \"\"\"\n license = []\n for classifier in trove:\n if 'License' in classifier:\n stripped = classifier.strip()\n # if taken from EGG-INFO, begins with Classifier:\n stripped = stripped[stripped.find('License'):]\n if stripped in settings.TROVE_LICENSES:\n license.append(settings.TROVE_LICENSES[stripped])\n return ' and '.join(license)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nfind out python version from list of trove classifiers.", "response": "def versions_from_trove(trove):\n \"\"\"Finds out python version from list of trove classifiers.\n Args:\n trove: list of trove classifiers\n Returns:\n python version string\n \"\"\"\n versions = set()\n for classifier in trove:\n if 'Programming Language :: Python ::' in classifier:\n ver = classifier.split('::')[-1]\n major = ver.split('.')[0].strip()\n if major:\n versions.add(major)\n return sorted(\n set([v for v in versions if v.replace('.', '', 1).isdigit()]))"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns an extension that extracts data from PyPI and merges them with data from extraction .", "response": "def pypi_metadata_extension(extraction_fce):\n \"\"\"Extracts data from PyPI and merges them with data from extraction\n method.\n \"\"\"\n\n def inner(self, client=None):\n data = extraction_fce(self)\n if client is None:\n logger.warning(\"Client is None, it was probably disabled\")\n data.update_attr('source0', self.archive.name)\n return data\n try:\n release_data = client.release_data(self.name, self.version)\n except BaseException:\n logger.warning(\"Some kind of error while communicating with \"\n \"client: {0}.\".format(client), exc_info=True)\n return data\n try:\n url, md5_digest = get_url(client, self.name, self.version)\n except exc.MissingUrlException:\n url, md5_digest = ('FAILED TO EXTRACT FROM PYPI',\n 'FAILED TO EXTRACT FROM PYPI')\n data_dict = {'source0': url, 'md5': md5_digest}\n\n for data_field in settings.PYPI_USABLE_DATA:\n data_dict[data_field] = release_data.get(data_field, '')\n\n # we usually get better license representation from trove classifiers\n data_dict[\"license\"] = license_from_trove(release_data.get(\n 'classifiers', ''))\n data.set_from(data_dict, update=True)\n return data\n return inner"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef venv_metadata_extension(extraction_fce):\n\n def inner(self):\n data = extraction_fce(self)\n if virtualenv is None or not self.venv:\n logger.debug(\"Skipping virtualenv metadata extraction.\")\n return data\n\n temp_dir = tempfile.mkdtemp()\n try:\n extractor = virtualenv.VirtualEnv(self.name, temp_dir,\n self.name_convertor,\n self.base_python_version)\n data.set_from(extractor.get_venv_data, update=True)\n except exc.VirtualenvFailException as e:\n logger.error(\"{}, skipping virtualenv metadata extraction.\".format(\n e))\n finally:\n shutil.rmtree(temp_dir)\n return data\n return inner", "response": "Returns a function that takes a virtualenv object and merges them with data from given extraction method."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef process_description(description_fce):\n def inner(description):\n clear_description = \\\n re.sub(r'\\s+', ' ', # multiple whitespaces\n # general URLs\n re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '',\n # delimiters\n re.sub('(#|=|---|~|`)*', '',\n # very short lines, typically titles\n re.sub('((\\r?\\n)|^).{0,8}((\\r?\\n)|$)', '',\n # PyPI's version and downloads tags\n re.sub(\n '((\\r*.. image::|:target:) https?|(:align:|:alt:))[^\\n]*\\n', '',\n description_fce(description))))))\n return ' '.join(textwrap.wrap(clear_description, 80))\n return inner", "response": "Removes special character delimiters titles and downloads tags and wraps paragraphs."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef versions_from_archive(self):\n py_vers = versions_from_trove(self.classifiers)\n return [ver for ver in py_vers if ver != self.unsupported_version]", "response": "Return a list of Python versions extracted from trove classifiers."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef srcname(self):\n if self.rpm_name or self.name.startswith(('python-', 'Python-')):\n return self.name_convertor.base_name(self.rpm_name or self.name)", "response": "Return the name of the pypi that should be changed."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nextracting data from archive.", "response": "def extract_data(self):\n \"\"\"Extracts data from archive.\n Returns:\n PackageData object containing the extracted data.\n \"\"\"\n data = PackageData(\n local_file=self.local_file,\n name=self.name,\n pkg_name=self.rpm_name or self.name_convertor.rpm_name(\n self.name, pkg_name=True),\n version=self.version,\n srcname=self.srcname)\n\n with self.archive:\n data.set_from(self.data_from_archive)\n\n # for example nose has attribute `packages` but instead of name\n # listing the pacakges is using function to find them, that makes\n # data.packages an empty set if virtualenv is disabled\n if self.venv_extraction_disabled and getattr(data, \"packages\") == []:\n data.packages = [data.name]\n\n return data"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning all metadata extracted from the archive.", "response": "def data_from_archive(self):\n \"\"\"Returns all metadata extractable from the archive.\n Returns:\n dictionary containing metadata extracted from the archive\n \"\"\"\n archive_data = {}\n\n archive_data['runtime_deps'] = self.runtime_deps\n archive_data['build_deps'] = [\n ['BuildRequires', 'python2-devel']] + self.build_deps\n\n archive_data['py_modules'] = self.py_modules\n archive_data['scripts'] = self.scripts\n\n archive_data['home_page'] = self.home_page\n archive_data['description'] = self.description\n archive_data['summary'] = self.summary\n\n archive_data['license'] = self.license\n archive_data['has_pth'] = self.has_pth\n archive_data['has_extension'] = self.has_extension\n archive_data['has_test_suite'] = self.has_test_suite\n\n archive_data['python_versions'] = self.versions_from_archive\n\n (archive_data['doc_files'],\n archive_data['doc_license']) = self.separate_license_files(\n self.doc_files)\n\n archive_data['dirname'] = self.archive.top_directory\n\n return archive_data"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef runtime_deps(self): # install_requires\n install_requires = self.metadata['install_requires']\n if self.metadata[\n 'entry_points'] and 'setuptools' not in install_requires:\n install_requires.append('setuptools') # entrypoints\n\n return sorted(self.name_convert_deps_list(deps_from_pyp_format(\n install_requires, runtime=True)))", "response": "Returns a list of runtime dependencies of the package specified in\n setup. py."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn list of doc files that should be used for %doc in specfile.", "response": "def doc_files(self):\n \"\"\"Returns list of doc files that should be used for %doc in specfile.\n Returns:\n List of doc files from the archive - only basenames, not full\n paths.\n \"\"\"\n doc_files = []\n for doc_file_re in settings.DOC_FILES_RE:\n doc_files.extend(\n self.archive.get_files_re(doc_file_re, ignorecase=True))\n return ['/'.join(x.split('/')[1:]) for x in doc_files]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef sphinx_dir(self):\n # search for sphinx dir doc/ or docs/ under the first directory in\n # archive (e.g. spam-1.0.0/doc)\n candidate_dirs = self.archive.get_directories_re(\n settings.SPHINX_DIR_RE, full_path=True)\n\n # search for conf.py in the dirs (TODO: what if more are found?)\n for directory in candidate_dirs:\n contains_conf_py = self.archive.get_files_re(\n r'{0}/conf.py$'.format(re.escape(directory)), full_path=True)\n in_tests = 'tests' in directory.split(os.sep)\n if contains_conf_py and not in_tests:\n return directory", "response": "Returns full path to sphinx documentation dir inside the archive or None if there is no such."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef data_from_archive(self):\n\n archive_data = super(SetupPyMetadataExtractor, self).data_from_archive\n\n archive_data['has_packages'] = self.has_packages\n archive_data['packages'] = self.packages\n archive_data['has_bundled_egg_info'] = self.has_bundled_egg_info\n sphinx_dir = self.sphinx_dir\n if sphinx_dir:\n archive_data['sphinx_dir'] = \"/\".join(sphinx_dir.split(\"/\")[1:])\n archive_data['build_deps'].append(\n ['BuildRequires', self.name_convertor.rpm_name(\n \"sphinx\", self.base_python_version)])\n\n return archive_data", "response": "Appends setup. py specific metadata to archive_data."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nextracts requires of given types from metadata file filter windows specific requires.", "response": "def get_requires(self, requires_types):\n \"\"\"Extracts requires of given types from metadata file, filter windows\n specific requires.\n \"\"\"\n if not isinstance(requires_types, list):\n requires_types = list(requires_types)\n extracted_requires = []\n for requires_name in requires_types:\n for requires in self.json_metadata.get(requires_name, []):\n if 'win' in requires.get('environment', {}):\n continue\n extracted_requires.extend(requires['requires'])\n return extracted_requires"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nretrieve list of package URLs using PyPI s XML - RPC.", "response": "def get_url(client, name, version, wheel=False, hashed_format=False):\n \"\"\"Retrieves list of package URLs using PyPI's XML-RPC. Chooses URL\n of prefered archive and md5_digest.\n \"\"\"\n try:\n release_urls = client.release_urls(name, version)\n release_data = client.release_data(name, version)\n except BaseException: # some kind of error with client\n logger.debug('Client: {0} Name: {1} Version: {2}.'.format(\n client, name, version))\n raise exceptions.MissingUrlException(\n \"Some kind of error while communicating with client: {0}.\".format(\n client), exc_info=True)\n\n url = ''\n md5_digest = None\n\n if not wheel:\n # Prefered archive is tar.gz\n if len(release_urls):\n zip_url = zip_md5 = ''\n for release_url in release_urls:\n if release_url['url'].endswith(\"tar.gz\"):\n url = release_url['url']\n md5_digest = release_url['md5_digest']\n if release_url['url'].endswith(\".zip\"):\n zip_url = release_url['url']\n zip_md5 = release_url['md5_digest']\n if url == '':\n url = zip_url or release_urls[0]['url']\n md5_digest = zip_md5 or release_urls[0]['md5_digest']\n elif release_data:\n url = release_data['download_url']\n else:\n # Only wheel is acceptable\n for release_url in release_urls:\n if release_url['url'].endswith(\"none-any.whl\"):\n url = release_url['url']\n md5_digest = release_url['md5_digest']\n break\n if not url:\n raise exceptions.MissingUrlException(\n \"Url of source archive not found.\")\n\n if url == 'UNKNOWN':\n raise exceptions.MissingUrlException(\n \"{0} package has no sources on PyPI, Please ask the maintainer \"\n \"to upload sources.\".format(release_data['name']))\n\n if not hashed_format:\n url = (\"https://files.pythonhosted.org/packages/source\"\n \"/{0[0]}/{0}/{1}\").format(name, url.split(\"/\")[-1])\n\n return (url, md5_digest)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get(self, wheel=False):\n try:\n url = get_url(self.client, self.name, self.version,\n wheel, hashed_format=True)[0]\n except exceptions.MissingUrlException as e:\n raise SystemExit(e)\n if wheel:\n self.temp_dir = tempfile.mkdtemp()\n save_dir = self.temp_dir\n else:\n save_dir = self.save_dir\n\n save_file = '{0}/{1}'.format(save_dir, url.split('/')[-1])\n request.urlretrieve(url, save_file)\n logger.info('Downloaded package from PyPI: {0}.'.format(save_file))\n return save_file", "response": "Downloads the package from PyPI."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncopy file from local filesystem to self. save_dir.", "response": "def get(self):\n \"\"\"Copies file from local filesystem to self.save_dir.\n Returns:\n Full path of the copied file.\n Raises:\n EnvironmentError if the file can't be found or the save_dir\n is not writable.\n \"\"\"\n if self.local_file.endswith('.whl'):\n self.temp_dir = tempfile.mkdtemp()\n save_dir = self.temp_dir\n else:\n save_dir = self.save_dir\n\n save_file = '{0}/{1}'.format(save_dir, os.path.basename(\n self.local_file))\n if not os.path.exists(save_file) or not os.path.samefile(\n self.local_file, save_file):\n shutil.copy2(self.local_file, save_file)\n logger.info('Local file: {0} copyed to {1}.'.format(\n self.local_file, save_file))\n\n return save_file"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns the filename stripped of the suffix.", "response": "def _stripped_name_version(self):\n \"\"\"Returns filename stripped of the suffix.\n Returns:\n Filename stripped of the suffix (extension).\n \"\"\"\n # we don't use splitext, because on \"a.tar.gz\" it returns (\"a.tar\",\n # \"gz\")\n filename = os.path.basename(self.local_file)\n for archive_suffix in settings.ARCHIVE_SUFFIXES:\n if filename.endswith(archive_suffix):\n return filename.rstrip('{0}'.format(archive_suffix))\n # if for cycle is exhausted it means no suffix was found\n else:\n raise exceptions.UnknownArchiveFormatException(\n 'Unkown archive format of file {0}.'.format(filename))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef fill(self, path):\n '''\n Scans content of directories\n '''\n self.bindir = set(os.listdir(path + 'bin/'))\n self.lib_sitepackages = set(os.listdir(glob.glob(\n path + 'lib/python?.?/site-packages/')[0]))", "response": "Fills the internal state with the contents of the directories in the given path."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ninstalling package given as first argument to virtualenv without any dependencies.", "response": "def install_package_to_venv(self):\n '''\n Installs package given as first argument to virtualenv without\n dependencies\n '''\n try:\n self.env.install(self.name, force=True, options=[\"--no-deps\"])\n except (ve.PackageInstallationException,\n ve.VirtualenvReadonlyException):\n raise VirtualenvFailException(\n 'Failed to install package to virtualenv')\n self.dirs_after_install.fill(self.temp_dir + '/venv/')"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting the final versions of site_packages and scripts using DirsContent sub method and filters", "response": "def get_dirs_differance(self):\n '''\n Makes final versions of site_packages and scripts using DirsContent\n sub method and filters\n '''\n try:\n diff = self.dirs_after_install - self.dirs_before_install\n except ValueError:\n raise VirtualenvFailException(\n \"Some of the DirsContent attributes is uninicialized\")\n self.data['has_pth'] = \\\n any([x for x in diff.lib_sitepackages if x.endswith('.pth')])\n\n site_packages = site_packages_filter(diff.lib_sitepackages)\n self.data['packages'] = sorted(\n [p for p in site_packages if not p.endswith(MODULE_SUFFIXES)])\n self.data['py_modules'] = sorted(set(\n [os.path.splitext(m)[0] for m in site_packages - set(\n self.data['packages'])]))\n self.data['scripts'] = scripts_filter(sorted(diff.bindir))\n logger.debug('Data from files differance in virtualenv:')\n logger.debug(pprint.pformat(self.data))"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nconverts a PyPI package to RPM specfile or SRPM.", "response": "def main(package, v, d, s, r, proxy, srpm, p, b, o, t, venv, autonc, sclize,\n **scl_kwargs):\n \"\"\"Convert PyPI package to RPM specfile or SRPM.\n\n \\b\n \\b\\bArguments:\n PACKAGE Provide PyPI name of the package or path to compressed\n source file.\n \"\"\"\n register_file_log_handler('/tmp/pyp2rpm-{0}.log'.format(getpass.getuser()))\n\n if srpm or s:\n register_console_log_handler()\n\n distro = o\n if t and os.path.splitext(t)[0] in settings.KNOWN_DISTROS:\n distro = t\n elif t and not (b or p):\n raise click.UsageError(\"Default python versions for template {0} are \"\n \"missing in settings, add them or use flags \"\n \"-b/-p to set python versions.\".format(t))\n\n logger = logging.getLogger(__name__)\n\n logger.info('Pyp2rpm initialized.')\n\n convertor = Convertor(package=package,\n version=v,\n save_dir=d,\n template=t or settings.DEFAULT_TEMPLATE,\n distro=distro,\n base_python_version=b,\n python_versions=p,\n rpm_name=r,\n proxy=proxy,\n venv=venv,\n autonc=autonc)\n\n logger.debug(\n 'Convertor: {0} created. Trying to convert.'.format(convertor))\n converted = convertor.convert()\n logger.debug('Convertor: {0} succesfully converted.'.format(convertor))\n\n if sclize:\n converted = convert_to_scl(converted, scl_kwargs)\n\n if srpm or s:\n if r:\n spec_name = r + '.spec'\n else:\n prefix = 'python-' if not convertor.name.startswith(\n 'python-') else ''\n spec_name = prefix + convertor.name + '.spec'\n logger.info('Using name: {0} for specfile.'.format(spec_name))\n if d == settings.DEFAULT_PKG_SAVE_PATH:\n # default save_path is rpmbuild tree so we want to save spec\n # in rpmbuild/SPECS/\n spec_path = d + '/SPECS/' + spec_name\n else:\n # if user provide save_path then save spec in provided path\n spec_path = d + '/' + spec_name\n spec_dir = os.path.dirname(spec_path)\n if not os.path.exists(spec_dir):\n os.makedirs(spec_dir)\n logger.debug('Opening specfile: {0}.'.format(spec_path))\n\n if not utils.PY3:\n converted = converted.encode('utf-8')\n with open(spec_path, 'w') as f:\n f.write(converted)\n logger.info('Specfile saved at: {0}.'.format(spec_path))\n\n if srpm:\n msg = utils.build_srpm(spec_path, d)\n logger.info(msg)\n\n else:\n logger.debug('Printing specfile to stdout.')\n if utils.PY3:\n print(converted)\n else:\n print(converted.encode('utf-8'))\n logger.debug('Specfile printed.')\n logger.info(\"That's all folks!\")"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef convert_to_scl(spec, scl_options):\n scl_options['skip_functions'] = scl_options['skip_functions'].split(',')\n scl_options['meta_spec'] = None\n convertor = SclConvertor(options=scl_options)\n return str(convertor.convert(spec))", "response": "Convert a spec into SCL - style spec file using spec2scl."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef format_options(self, ctx, formatter):\n super(Pyp2rpmCommand, self).format_options(ctx, formatter)\n scl_opts = []\n for param in self.get_params(ctx):\n if isinstance(param, SclizeOption):\n scl_opts.append(param.get_scl_help_record(ctx))\n\n if scl_opts:\n with formatter.section('SCL related options'):\n formatter.write_dl(scl_opts)", "response": "Writes the SCL related options into the formatter as a separate separate\n group."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef handle_parse_result(self, ctx, opts, args):\n if 'sclize' in opts and not SclConvertor:\n raise click.UsageError(\"Please install spec2scl package to \"\n \"perform SCL-style conversion\")\n if self.name in opts and 'sclize' not in opts:\n raise click.UsageError(\n \"`--{}` can only be used with --sclize option\".format(\n self.name))\n\n return super(SclizeOption, self).handle_parse_result(ctx, opts, args)", "response": "Validate SCL related options before parsing."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nchecks if given value is a list tries to convert it if it is not.", "response": "def to_list(var):\n \"\"\"Checks if given value is a list, tries to convert, if it is not.\"\"\"\n if var is None:\n return []\n if isinstance(var, str):\n var = var.split('\\n')\n elif not isinstance(var, list):\n try:\n var = list(var)\n except TypeError:\n raise ValueError(\"{} cannot be converted to the list.\".format(var))\n return var"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nsend extracted metadata in json format to stdout if stdout option is specified assigns metadata dictionary to class_metadata variable otherwise.", "response": "def run(self):\n \"\"\"Sends extracted metadata in json format to stdout if stdout\n option is specified, assigns metadata dictionary to class_metadata\n variable otherwise.\n \"\"\"\n if self.stdout:\n sys.stdout.write(\"extracted json data:\\n\" + json.dumps(\n self.metadata, default=to_str) + \"\\n\")\n else:\n extract_dist.class_metadata = self.metadata"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nconverting a dependency retrieved by pkg_resources. Requirement. parse to RPM format.", "response": "def dependency_to_rpm(dep, runtime):\n \"\"\"Converts a dependency got by pkg_resources.Requirement.parse()\n to RPM format.\n Args:\n dep - a dependency retrieved by pkg_resources.Requirement.parse()\n runtime - whether the returned dependency should be runtime (True)\n or build time (False)\n Returns:\n List of semi-SPECFILE dependencies (package names are not properly\n converted yet).\n For example: [['Requires', 'jinja2'],\n ['Conflicts', 'jinja2', '=', '2.0.1']]\n \"\"\"\n logger.debug('Dependencies provided: {0} runtime: {1}.'.format(\n dep, runtime))\n converted = []\n if not len(dep.specs):\n converted.append(['Requires', dep.project_name])\n else:\n for ver_spec in dep.specs:\n if ver_spec[0] == '!=':\n converted.append(\n ['Conflicts', dep.project_name, '=', ver_spec[1]])\n elif ver_spec[0] == '==':\n converted.append(\n ['Requires', dep.project_name, '=', ver_spec[1]])\n else:\n converted.append(\n ['Requires', dep.project_name, ver_spec[0], ver_spec[1]])\n\n if not runtime:\n for conv in converted:\n conv[0] = \"Build\" + conv[0]\n logger.debug('Converted dependencies: {0}.'.format(converted))\n\n return converted"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef deps_from_pyp_format(requires, runtime=True):\n parsed = []\n logger.debug(\"Dependencies from setup.py: {0} runtime: {1}.\".format(\n requires, runtime))\n\n for req in requires:\n try:\n parsed.append(Requirement.parse(req))\n except ValueError:\n logger.warn(\"Unparsable dependency {0}.\".format(req),\n exc_info=True)\n\n in_rpm_format = []\n for dep in parsed:\n in_rpm_format.extend(dependency_to_rpm(dep, runtime))\n logger.debug(\"Dependencies from setup.py in rpm format: {0}.\".format(\n in_rpm_format))\n\n return in_rpm_format", "response": "Parses the list of dependencies extracted from setup. py into a list of semi -SPECFILE dependencies."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nparse the dependencies returned by pydit. json and returns a list of dependecies.", "response": "def deps_from_pydit_json(requires, runtime=True):\n \"\"\"Parses dependencies returned by pydist.json, since versions\n uses brackets we can't use pkg_resources to parse and we need a separate\n method\n Args:\n requires: list of dependencies as written in pydist.json of the package\n runtime: are the dependencies runtime (True) or build time (False)\n Returns:\n List of semi-SPECFILE dependecies (see dependency_to_rpm for format)\n \"\"\"\n parsed = []\n for req in requires:\n # req looks like 'some-name (>=X.Y,!=Y.X)' or 'someme-name' where\n # 'some-name' is the name of required package and '(>=X.Y,!=Y.X)'\n # are specs\n name, specs = None, None\n # len(reqs) == 1 if there are not specified versions, 2 otherwise\n reqs = req.split(' ')\n name = reqs[0]\n if len(reqs) == 2:\n specs = reqs[1]\n # try if there are more specs in spec part of the requires\n specs = specs.split(\",\")\n # strip brackets\n specs = [re.sub('[()]', '', spec) for spec in specs]\n # this will divide (>=0.1.2) to ['>=', '0', '.1.2']\n # or (0.1.2) into ['', '0', '.1.2']\n specs = [re.split('([0-9])', spec, 1) for spec in specs]\n # we have separated specs based on number as delimiter\n # so we need to join it back to rest of version number\n # e.g ['>=', '0', '.1.2'] to ['>=', '0.1.2']\n for spec in specs:\n spec[1:3] = [''.join(spec[1:3])]\n if specs:\n for spec in specs:\n if '!' in spec[0]:\n parsed.append(['Conflicts', name, '=', spec[1]])\n elif specs[0] == '==':\n parsed.append(['Requires', name, '=', spec[1]])\n else:\n parsed.append(['Requires', name, spec[0], spec[1]])\n else:\n parsed.append(['Requires', name])\n\n if not runtime:\n for pars in parsed:\n pars[0] = 'Build' + pars[0]\n\n return parsed"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_changelog_date_packager(self):\n try:\n packager = subprocess.Popen(\n 'rpmdev-packager', stdout=subprocess.PIPE).communicate(\n )[0].strip()\n except OSError:\n # Hi John Doe, you should install rpmdevtools\n packager = \"John Doe \"\n logger.warn(\"Package rpmdevtools is missing, using default \"\n \"name: {0}.\".format(packager))\n with utils.c_time_locale():\n date_str = time.strftime('%a %b %d %Y', time.gmtime())\n encoding = locale.getpreferredencoding()\n return u'{0} {1}'.format(date_str, packager.decode(encoding))", "response": "Returns part of the changelog entry containing date and packager."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nexecuting the code of the specified module.", "response": "def run(self):\n \"\"\"Executes the code of the specified module.\"\"\"\n with utils.ChangeDir(self.dirname):\n sys.path.insert(0, self.dirname)\n sys.argv[1:] = self.args\n runpy.run_module(self.not_suffixed(self.filename),\n run_name='__main__',\n alter_sys=True)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nexecuting the code of the specified module. Deserializes captured json data.", "response": "def run(self, interpreter):\n \"\"\"Executes the code of the specified module. Deserializes captured\n json data.\n \"\"\"\n with utils.ChangeDir(self.dirname):\n command_list = ['PYTHONPATH=' + main_dir, interpreter,\n self.filename] + list(self.args)\n try:\n proc = Popen(' '.join(command_list), stdout=PIPE, stderr=PIPE,\n shell=True)\n stream_data = proc.communicate()\n except Exception as e:\n logger.error(\n \"Error {0} while executing extract_dist command.\".format(e))\n raise ExtractionError\n stream_data = [utils.console_to_str(s) for s in stream_data]\n if proc.returncode:\n logger.error(\n \"Subprocess failed, stdout: {0[0]}, stderr: {0[1]}\".format(\n stream_data))\n self._result = json.loads(stream_data[0].split(\n \"extracted json data:\\n\")[-1].split(\"\\n\")[0])"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the class that can read this archive based on archive suffix.", "response": "def extractor_cls(self):\n \"\"\"Returns the class that can read this archive based on archive suffix.\n Returns:\n Class that can read this archive or None if no such exists.\n \"\"\"\n file_cls = None\n\n # only catches \".gz\", even from \".tar.gz\"\n if self.is_tar:\n file_cls = TarFile\n elif self.is_zip:\n file_cls = ZipFile\n else:\n logger.info(\"Couldn't recognize archive suffix: {0}.\".format(\n self.suffix))\n\n return file_cls"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_content_of_file(self, name, full_path=False):\n if self.handle:\n for member in self.handle.getmembers():\n if (full_path and member.name == name) or (\n not full_path and os.path.basename(\n member.name) == name):\n extracted = self.handle.extractfile(member)\n return extracted.read().decode(\n locale.getpreferredencoding())\n\n return None", "response": "Returns the content of a file in the archive."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef extract_file(self, name, full_path=False, directory=\".\"):\n if self.handle:\n for member in self.handle.getmembers():\n if (full_path and member.name == name or\n not full_path and os.path.basename(\n member.name) == name):\n # TODO handle KeyError exception\n self.handle.extract(member, path=directory)", "response": "Extract a file from the archive to the specified working directory."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef extract_all(self, directory=\".\", members=None):\n if self.handle:\n self.handle.extractall(path=directory, members=members)", "response": "Extract all members from the archive to the specified working directory."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef has_file_with_suffix(self, suffixes):\n if not isinstance(suffixes, list):\n suffixes = [suffixes]\n\n if self.handle:\n for member in self.handle.getmembers():\n if os.path.splitext(member.name)[1] in suffixes:\n return True\n else:\n # hack for .zip files, where directories are not returned\n # themselves, therefore we can't find e.g. .egg-info\n for suffix in suffixes:\n if '{0}/'.format(suffix) in member.name:\n return True\n\n return False", "response": "Finds out if there is a file with one of the given suffixes in the archive."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nfinds all files that match file_re and returns their list.", "response": "def get_files_re(self, file_re, full_path=False, ignorecase=False):\n \"\"\"Finds all files that match file_re and returns their list.\n Doesn't return directories, only files.\n\n Args:\n file_re: raw string to match files against (gets compiled into re)\n full_path: whether to match against full path inside the archive\n or just the filenames\n ignorecase: whether to ignore case when using the given re\n Returns:\n List of full paths of files inside the archive that match the given\n file_re.\n \"\"\"\n try:\n if ignorecase:\n compiled_re = re.compile(file_re, re.I)\n else:\n compiled_re = re.compile(file_re)\n except sre_constants.error:\n logger.error(\"Failed to compile regex: {}.\".format(file_re))\n return []\n\n found = []\n\n if self.handle:\n for member in self.handle.getmembers():\n if isinstance(member, TarInfo) and member.isdir():\n pass # for TarInfo files, filter out directories\n elif (full_path and compiled_re.search(member.name)) or (\n not full_path and compiled_re.search(os.path.basename(\n member.name))):\n found.append(member.name)\n\n return found"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns the name of the archive topmost directory.", "response": "def top_directory(self):\n \"\"\"Return the name of the archive topmost directory.\"\"\"\n if self.handle:\n return os.path.commonprefix(self.handle.getnames()).rstrip('/')"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef rpm_versioned_name(cls, name, version, default_number=False):\n regexp = re.compile(r'^python(\\d*|)-(.*)')\n auto_provides_regexp = re.compile(r'^python(\\d*|)dist(.*)')\n\n if (not version or version == cls.get_default_py_version() and\n not default_number):\n found = regexp.search(name)\n # second check is to avoid renaming of python2-devel to\n # python-devel\n if found and found.group(2) != 'devel':\n if 'epel' not in cls.template:\n return 'python-{0}'.format(regexp.search(name).group(2))\n return name\n\n versioned_name = name\n if version:\n\n if regexp.search(name):\n versioned_name = re.sub(r'^python(\\d*|)-', 'python{0}-'.format(\n version), name)\n elif auto_provides_regexp.search(name):\n versioned_name = re.sub(\n r'^python(\\d*|)dist', 'python{0}dist'.format(\n version), name)\n\n else:\n versioned_name = 'python{0}-{1}'.format(version, name)\n if ('epel' in cls.template and version !=\n cls.get_default_py_version()):\n versioned_name = versioned_name.replace('{0}'.format(\n version), '%{{python{0}_pkgversion}}'.format(version))\n return versioned_name", "response": "Returns the name of the current version of the given name."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nconvert a name of the package to a rpm name according to Packaging Guidelines.", "response": "def rpm_name(self, name, python_version=None, pkg_name=False):\n \"\"\"Returns name of the package converted to (possibly) correct package\n name according to Packaging Guidelines.\n Args:\n name: name to convert\n python_version: python version for which to retrieve the name of\n the package\n pkg_name: flag to perform conversion of rpm package name,\n present in this class just for API compatibility reason\n Returns:\n Converted name of the package, that should be in line with\n Fedora Packaging Guidelines. If for_python is not None,\n the returned name is in form python%(version)s-%(name)s\n \"\"\"\n logger.debug(\"Converting name: {0} to rpm name, version: {1}.\".format(\n name, python_version))\n rpmized_name = self.base_name(name)\n\n rpmized_name = 'python-{0}'.format(rpmized_name)\n\n if self.distro == 'mageia':\n rpmized_name = rpmized_name.lower()\n logger.debug('Rpmized name of {0}: {1}.'.format(name, rpmized_name))\n return NameConvertor.rpm_versioned_name(rpmized_name, python_version)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nremoving any python prefixes of suffixes from name if present.", "response": "def base_name(self, name):\n \"\"\"Removes any python prefixes of suffixes from name if present.\"\"\"\n base_name = name.replace('.', \"-\")\n # remove python prefix if present\n found_prefix = self.reg_start.search(name)\n if found_prefix:\n base_name = found_prefix.group(2)\n\n # remove -pythonXY like suffix if present\n found_end = self.reg_end.search(name.lower())\n if found_end:\n base_name = found_end.group(1)\n\n return base_name"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nmerge this object with another object.", "response": "def merge(self, other):\n \"\"\"Merges object with other NameVariants object, not set values\n of self.variants are replace by values from other object.\n \"\"\"\n if not isinstance(other, NameVariants):\n raise TypeError(\"NameVariants isinstance can be merge with\"\n \"other isinstance of the same class\")\n for key in self.variants:\n self.variants[key] = self.variants[key] or other.variants[key]\n\n return self"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef rpm_name(self, name, python_version=None, pkg_name=False):\n if pkg_name:\n return super(DandifiedNameConvertor, self).rpm_name(\n name, python_version)\n original_name = name\n converted = super(DandifiedNameConvertor, self).rpm_name(\n name, python_version)\n python_query = self.query.filter(name__substr=[\n 'python', 'py', original_name, canonical_form(original_name)])\n if converted in [pkg.name for pkg in python_query]:\n logger.debug(\"Converted name exists\")\n return converted\n\n logger.debug(\"Converted name not found, searches for correct form\")\n\n not_versioned_name = NameVariants(self.base_name(original_name), '')\n versioned_name = NameVariants(self.base_name(original_name),\n python_version)\n\n if self.base_name(original_name).startswith(\"py\"):\n nonpy_name = NameVariants(self.base_name(\n original_name)[2:], python_version)\n\n for pkg in python_query:\n versioned_name.find_match(pkg.name)\n not_versioned_name.find_match(pkg.name)\n if 'nonpy_name' in locals():\n nonpy_name.find_match(pkg.name)\n\n if 'nonpy_name' in locals():\n versioned_name = versioned_name.merge(nonpy_name)\n\n correct_form = versioned_name.merge(not_versioned_name).best_matching\n logger.debug(\"Most likely correct form of the name {0}.\".format(\n correct_form))\n return correct_form or converted", "response": "Checks if name converted using superclass rpm_name_method match name\nelib."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef merge_versions(self, data):\n if self.template == \"epel6.spec\":\n # if user requested version greater than 2, writes error message\n # and exits\n requested_versions = self.python_versions\n if self.base_python_version:\n requested_versions += [self.base_python_version]\n if any(int(ver[0]) > 2 for ver in requested_versions):\n sys.stderr.write(\n \"Invalid version, major number of python version for \"\n \"EPEL6 spec file must not be greater than 2.\\n\")\n sys.exit(1)\n # if version greater than 2 were extracted it is removed\n data.python_versions = [\n ver for ver in data.python_versions if not int(ver[0]) > 2]\n\n # Set python versions from default values in settings.\n base_version, additional_versions = (\n self.template_base_py_ver, self.template_py_vers)\n\n # Sync default values with extracted versions from PyPI classifiers.\n if data.python_versions:\n if base_version not in data.python_versions:\n base_version = data.python_versions[0]\n\n additional_versions = [\n v for v in additional_versions if v in data.python_versions]\n\n # Override default values with those set from command line if any.\n if self.base_python_version:\n base_version = self.base_python_version\n if self.python_versions:\n additional_versions = [\n v for v in self.python_versions if v != base_version]\n\n data.base_python_version = base_version\n data.python_versions = additional_versions", "response": "Merges python versions specified in command line options with extracted versions from PyPI classifiers."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a string representation of the RPM SPECFILE.", "response": "def convert(self):\n \"\"\"Returns RPM SPECFILE.\n Returns:\n rendered RPM SPECFILE.\n \"\"\"\n # move file into position\n try:\n local_file = self.getter.get()\n except (exceptions.NoSuchPackageException, OSError) as e:\n logger.error(\n \"Failed and exiting:\", exc_info=True)\n logger.info(\"Pyp2rpm failed. See log for more info.\")\n\n sys.exit(e)\n\n # save name and version from the file (rewrite if set previously)\n self.name, self.version = self.getter.get_name_version()\n\n self.local_file = local_file\n data = self.metadata_extractor.extract_data(self.client)\n logger.debug(\"Extracted metadata:\")\n logger.debug(pprint.pformat(data.data))\n self.merge_versions(data)\n\n jinja_env = jinja2.Environment(loader=jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(['/']),\n jinja2.PackageLoader('pyp2rpm', 'templates'), ]))\n\n for filter in filters.__all__:\n jinja_env.filters[filter.__name__] = filter\n\n try:\n jinja_template = jinja_env.get_template(\n os.path.abspath(self.template))\n except jinja2.exceptions.TemplateNotFound:\n # absolute path not found => search in default template dir\n logger.warn('Template: {0} was not found in {1} using default '\n 'template dir.'.format(\n self.template, os.path.abspath(self.template)))\n\n jinja_template = jinja_env.get_template(self.template)\n logger.info('Using default template: {0}.'.format(self.template))\n\n ret = jinja_template.render(data=data, name_convertor=name_convertor)\n return re.sub(r'[ \\t]+\\n', \"\\n\", ret)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef getter(self):\n if not hasattr(self, '_getter'):\n if not self.pypi:\n self._getter = package_getters.LocalFileGetter(\n self.package,\n self.save_dir)\n else:\n logger.debug(\n '{0} does not exist as local file trying PyPI.'.format(\n self.package))\n self._getter = package_getters.PypiDownloader(\n self.client,\n self.package,\n self.version,\n self.save_dir)\n\n return self._getter", "response": "Returns an instance of proper PackageGetter subclass. Always returns the same instance."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns an instance of proper MetadataExtractor subclass.", "response": "def metadata_extractor(self):\n \"\"\"Returns an instance of proper MetadataExtractor subclass.\n Always returns the same instance.\n\n Returns:\n The proper MetadataExtractor subclass according to local file\n suffix.\n \"\"\"\n if not hasattr(self, '_local_file'):\n raise AttributeError(\"local_file attribute must be set before \"\n \"calling metadata_extractor\")\n if not hasattr(self, '_metadata_extractor'):\n if self.local_file.endswith('.whl'):\n logger.info(\"Getting metadata from wheel using \"\n \"WheelMetadataExtractor.\")\n extractor_cls = metadata_extractors.WheelMetadataExtractor\n else:\n logger.info(\"Getting metadata from setup.py using \"\n \"SetupPyMetadataExtractor.\")\n extractor_cls = metadata_extractors.SetupPyMetadataExtractor\n\n base_python_version = (\n self.base_python_version or self.template_base_py_ver)\n\n self._metadata_extractor = extractor_cls(\n self.local_file,\n self.name,\n self.name_convertor,\n self.version,\n self.rpm_name,\n self.venv,\n base_python_version)\n\n return self._metadata_extractor"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the same instance of XMLRPC client for PyPI.", "response": "def client(self):\n \"\"\"XMLRPC client for PyPI. Always returns the same instance.\n\n If the package is provided as a path to compressed source file,\n PyPI will not be used and the client will not be instantiated.\n\n Returns:\n XMLRPC client for PyPI or None.\n \"\"\"\n if self.proxy:\n proxyhandler = urllib.ProxyHandler({\"http\": self.proxy})\n opener = urllib.build_opener(proxyhandler)\n urllib.install_opener(opener)\n transport = ProxyTransport()\n if not hasattr(self, '_client'):\n transport = None\n if self.pypi:\n if self.proxy:\n logger.info('Using provided proxy: {0}.'.format(\n self.proxy))\n self._client = xmlrpclib.ServerProxy(settings.PYPI_URL,\n transport=transport)\n self._client_set = True\n else:\n self._client = None\n\n return self._client"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef build_srpm(specfile, save_dir):\n logger.info('Starting rpmbuild to build: {0} SRPM.'.format(specfile))\n if save_dir != get_default_save_path():\n try:\n msg = subprocess.Popen(\n ['rpmbuild',\n '--define', '_sourcedir {0}'.format(save_dir),\n '--define', '_builddir {0}'.format(save_dir),\n '--define', '_srcrpmdir {0}'.format(save_dir),\n '--define', '_rpmdir {0}'.format(save_dir),\n '-bs', specfile], stdout=subprocess.PIPE).communicate(\n )[0].strip()\n except OSError:\n logger.error(\n \"Rpmbuild failed for specfile: {0} and save_dir: {1}\".format(\n specfile, save_dir), exc_info=True)\n msg = 'Rpmbuild failed. See log for more info.'\n return msg\n else:\n if not os.path.exists(save_dir):\n raise IOError(\"Specify folder to store a file (SAVE_DIR) \"\n \"or install rpmdevtools.\")\n try:\n msg = subprocess.Popen(\n ['rpmbuild',\n '--define', '_sourcedir {0}'.format(save_dir + '/SOURCES'),\n '--define', '_builddir {0}'.format(save_dir + '/BUILD'),\n '--define', '_srcrpmdir {0}'.format(save_dir + '/SRPMS'),\n '--define', '_rpmdir {0}'.format(save_dir + '/RPMS'),\n '-bs', specfile], stdout=subprocess.PIPE).communicate(\n )[0].strip()\n except OSError:\n logger.error(\"Rpmbuild failed for specfile: {0} and save_dir: \"\n \"{1}\".format(specfile, save_dir), exc_info=True)\n msg = 'Rpmbuild failed. See log for more info.'\n return msg", "response": "Builds a tree of srpms from a given specfile using rpmbuild."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncheck if executables already contain a - MAJOR. MINOR suffix.", "response": "def remove_major_minor_suffix(scripts):\n \"\"\"Checks if executables already contain a \"-MAJOR.MINOR\" suffix. \"\"\"\n minor_major_regex = re.compile(\"-\\d.?\\d?$\")\n return [x for x in scripts if not minor_major_regex.search(x)]"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nadd all runtime deps to build deps", "response": "def runtime_to_build(runtime_deps):\n \"\"\"Adds all runtime deps to build deps\"\"\"\n build_deps = copy.deepcopy(runtime_deps)\n for dep in build_deps:\n if len(dep) > 0:\n dep[0] = 'BuildRequires'\n return build_deps"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef unique_deps(deps):\n deps.sort()\n return list(k for k, _ in itertools.groupby(deps))", "response": "Remove duplicates from deps list of the lists"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting value of given macro using rpm tool", "response": "def rpm_eval(macro):\n \"\"\"Get value of given macro using rpm tool\"\"\"\n try:\n value = subprocess.Popen(\n ['rpm', '--eval', macro],\n stdout=subprocess.PIPE).communicate()[0].strip()\n except OSError:\n logger.error('Failed to get value of {0} rpm macro'.format(\n macro), exc_info=True)\n value = b''\n return console_to_str(value)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_default_save_path():\n macro = '%{_topdir}'\n if rpm:\n save_path = rpm.expandMacro(macro)\n else:\n save_path = rpm_eval(macro)\n if not save_path:\n logger.warn(\"rpm tools are missing, using default save path \"\n \"~/rpmbuild/.\")\n save_path = os.path.expanduser('~/rpmbuild')\n return save_path", "response": "Return default save path for the packages"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef check_and_get_data(input_list,**pars):\n empty_list = [] \n retrieve_list = [] # Actual files retrieved via astroquery and resident on disk\n candidate_list = [] # File names gathered from *_asn.fits file\n ipppssoot_list = [] # ipppssoot names used to avoid duplicate downloads \n total_input_list = [] # Output full filename list of data on disk\n\n # Loop over the input_list to determine if the item in the input_list is a full association file \n # (*_asn.fits), a full individual image file (aka singleton, *_flt.fits), or a root name specification \n # (association or singleton, ipppssoot).\n for input_item in input_list:\n print('Input item: ', input_item)\n indx = input_item.find('_')\n \n # Input with a suffix (_xxx.fits)\n if indx != -1:\n lc_input_item = input_item.lower()\n suffix = lc_input_item[indx+1:indx+4]\n print('file: ', lc_input_item)\n # For an association, need to open the table and read the image names as this could\n # be a custom association. The assumption is this file is on local disk when specified\n # in this manner (vs just the ipppssoot of the association).\n # This \"if\" block just collects the wanted full file names.\n if suffix == 'asn':\n try:\n asntab = Table.read(input_item, format='fits')\n except FileNotFoundError:\n log.error('File {} not found.'.format(input_item))\n return(empty_list)\n for row in asntab:\n if row['MEMTYPE'].startswith('PROD'):\n continue\n memname = row['MEMNAME'].lower().strip()\n # Need to check if the MEMNAME is a full filename or an ipppssoot\n if memname.find('_') != -1:\n candidate_list.append(memname)\n else:\n candidate_list.append(memname + '_flc.fits')\n elif suffix == 'flc' or suffix == 'flt':\n if lc_input_item not in candidate_list:\n candidate_list.append(lc_input_item)\n else:\n log.error('Inappropriate file suffix: {}. Looking for \"asn.fits\", \"flc.fits\", or \"flt.fits\".'.format(suffix))\n return(empty_list)\n\n # Input is an ipppssoot (association or singleton), nine characters by definition.\n # This \"else\" block actually downloads the data specified as ipppssoot.\n elif len(input_item) == 9:\n try:\n if input_item not in ipppssoot_list:\n # An ipppssoot of an individual file which is part of an association cannot be\n # retrieved from MAST\n retrieve_list = aqutils.retrieve_observation(input_item,**pars)\n\n # If the retrieved list is not empty, add filename(s) to the total_input_list.\n # Also, update the ipppssoot_list so we do not try to download the data again. Need\n # to do this since retrieve_list can be empty because (1) data cannot be acquired (error)\n # or (2) data is already on disk (ok).\n if retrieve_list:\n total_input_list += retrieve_list\n ipppssoot_list.append(input_item)\n else:\n log.error('File {} cannot be retrieved from MAST.'.format(input_item))\n return(empty_list)\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)\n\n # Only the retrieve_list files via astroquery have been put into the total_input_list thus far.\n # Now check candidate_list to detect or acquire the requested files from MAST via\n # astroquery.\n for file in candidate_list:\n # If the file is found on disk, add it to the total_input_list and continue\n if glob.glob(file):\n total_input_list.append(file)\n continue\n else:\n log.error('File {} cannot be found on the local disk.'.format(file))\n return(empty_list)\n\n log.info(\"TOTAL INPUT LIST: {}\".format(total_input_list))\n return(total_input_list)", "response": "Verify that all specified files are present. If not retrieve them from MAST."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef perform_align(input_list, **kwargs):\n filteredTable = Table()\n run_align(input_list, result=filteredTable, **kwargs)\n return filteredTable", "response": "Main function for aligning IPPSSOOTs."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nperform cross - matching and final fit using 2dHistogram matching", "response": "def match_relative_fit(imglist, reference_catalog):\n \"\"\"Perform cross-matching and final fit using 2dHistogram matching\n\n Parameters\n ----------\n imglist : list\n List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs\n\n reference_catalog : Table\n Astropy Table of reference sources for this field\n\n Returns\n --------\n imglist : list\n List of input image `~tweakwcs.tpwcs.FITSWCS` objects with metadata and source catalogs\n\n \"\"\"\n log.info(\"------------------- STEP 5b: (match_relative_fit) Cross matching and fitting ---------------------------\")\n # 0: Specify matching algorithm to use\n match = tweakwcs.TPMatch(searchrad=75, separation=0.1,\n tolerance=2, use2dhist=True)\n # match = tweakwcs.TPMatch(searchrad=250, separation=0.1,\n # tolerance=100, use2dhist=False)\n\n # Align images and correct WCS\n # NOTE: this invocation does not use an astrometric catalog. This call allows all the input images to be aligned in\n # a relative way using the first input image as the reference.\n # 1: Perform relative alignment\n tweakwcs.align_wcs(imglist, None, match=match, expand_refcat=True)\n\n # Set all the group_id values to be the same so the various images/chips will be aligned to the astrometric\n # reference catalog as an ensemble.\n # BEWARE: If additional iterations of solutions are to be done, the group_id values need to be restored.\n for image in imglist:\n image.meta[\"group_id\"] = 1234567\n # 2: Perform absolute alignment\n tweakwcs.align_wcs(imglist, reference_catalog, match=match)\n\n # 3: Interpret RMS values from tweakwcs\n interpret_fit_rms(imglist, reference_catalog)\n\n return imglist"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef match_default_fit(imglist, reference_catalog):\n log.info(\"-------------------- STEP 5b: (match_default_fit) Cross matching and fitting ---------------------------\")\n # Specify matching algorithm to use\n match = tweakwcs.TPMatch(searchrad=250, separation=0.1,\n tolerance=100, use2dhist=False)\n # Align images and correct WCS\n tweakwcs.align_wcs(imglist, reference_catalog, match=match, expand_refcat=False) #TODO: turn on 'expand_refcat' option in future development\n\n # Interpret RMS values from tweakwcs\n interpret_fit_rms(imglist, reference_catalog)\n\n return imglist", "response": "Perform cross - matching and final fit using 2dHistogram matching\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef determine_fit_quality(imglist,filteredTable, print_fit_parameters=True):\n tweakwcs_info_keys = OrderedDict(imglist[0].meta['fit_info']).keys()\n max_rms_val = 1e9\n num_xmatches = 0\n fitStatusDict={}\n xshifts=[]\n yshifts=[]\n overall_valid = True\n overall_comp = False\n for item in imglist:\n if item.meta['fit_info']['status'].startswith('FAILED') != True:\n xshifts.append(item.meta['fit_info']['shift'][0])\n yshifts.append(item.meta['fit_info']['shift'][1])\n\n for item in imglist:\n image_name = item.meta['name']\n chip_num = item.meta['chip']\n\n # Build fitStatusDict entry\n dictKey = \"{},{}\".format(image_name, chip_num)\n fitStatusDict[dictKey] = {'valid': False,\n 'max_rms': max_rms_val,\n 'num_matches': num_xmatches,\n 'compromised': False,\n 'reason': \"\"} # Initialize dictionary entry for current image/chip\n #Handle fitting failures (no matches found)\n if item.meta['fit_info']['status'].startswith(\"FAILED\") == True:\n log.warning(\"No cross matches found in any catalog for {} - no processing done.\".format(image_name))\n continue\n fit_rms_val = item.meta['fit_info']['FIT_RMS']\n max_rms_val = item.meta['fit_info']['TOTAL_RMS']\n num_xmatches = item.meta['fit_info']['nmatches']\n fitStatusDict[dictKey]['max_rms'] = max_rms_val\n fitStatusDict[dictKey]['num_matches'] = num_xmatches\n\n if num_xmatches < MIN_CROSS_MATCHES:\n if catalogIndex < numCatalogs-1:\n log.warning(\"Not enough cross matches found between astrometric catalog and sources found in {}\".format(image_name))\n continue\n\n # Execute checks\n nmatchesCheck = False\n if num_xmatches > 4:\n nmatchesCheck = True\n\n radialOffsetCheck = False\n radialOffset = math.sqrt(float(item.meta['fit_info']['shift'][0])**2 +\n float(item.meta['fit_info']['shift'][0])**2)*item.wcs.pscale #radial offset in arssec\n if float(num_xmatches) * 0.36 > 0.8 + (radialOffset/10.0)**8:\n radialOffsetCheck = True\n\n largeRmsCheck = True\n if fit_rms_val > 150. or max_rms_val > 150.:\n largeRmsCheck = False\n\n # fitRmsCheck = False\n # if fit_rms_val < max_rms_val:\n # fitRmsCheck = True\n\n consistencyCheck = True\n rms_limit = max(item.meta['fit_info']['TOTAL_RMS'], 10.)\n if not math.sqrt(np.std(np.asarray(xshifts)) ** 2 + np.std(np.asarray(yshifts)) ** 2) <= (\n rms_limit / 1000.0) / (item.wcs.pscale):\n consistencyCheck = False\n\n # Decide if fit solutions are valid based on checks\n if consistencyCheck == False: # Failed consistency check\n fitStatusDict[dictKey]['valid'] = False\n fitStatusDict[dictKey]['compromised'] = False\n fitStatusDict[dictKey]['reason'] = \"Consistency violation!\"\n elif largeRmsCheck == False: # RMS value(s) too large\n fitStatusDict[dictKey]['valid'] = False\n fitStatusDict[dictKey]['compromised'] = False\n fitStatusDict[dictKey]['reason'] = \"RMS too large (>150 mas)!\"\n elif radialOffsetCheck == False: # Failed radial offset check\n fitStatusDict[dictKey]['valid'] = False\n fitStatusDict[dictKey]['compromised'] = False\n fitStatusDict[dictKey]['reason'] = \"Radial offset value too large!\"\n elif nmatchesCheck == False: # Too few matches\n fitStatusDict[dictKey]['valid'] = True\n fitStatusDict[dictKey]['compromised'] = True\n fitStatusDict[dictKey]['reason'] = \"Too few matches!\"\n else: # all checks passed. Valid solution.\n fitStatusDict[dictKey]['valid'] = True\n fitStatusDict[dictKey]['compromised'] = False\n fitStatusDict[dictKey]['reason'] = \"\"\n # for now, generate overall valid and compromised values. Basically, if any of the entries for \"valid\" is False,\n # treat the whole dataset as not valid. Same goes for compromised.\n if fitStatusDict[dictKey]['valid'] == False:\n overall_valid = False\n if fitStatusDict[dictKey]['compromised'] == True:\n overall_comp = True\n\n log.info('RESULTS FOR {} Chip {}: FIT_RMS = {} mas, TOTAL_RMS = {} mas, NUM = {}'.format(image_name, item.meta['chip'], fit_rms_val, max_rms_val, num_xmatches))\n # print fit params to screen\n if print_fit_parameters:\n log.info(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIT PARAMETERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n log.info(\"image: {}\".format(image_name))\n log.info(\"chip: {}\".format(item.meta['chip']))\n log.info(\"group_id: {}\".format(item.meta['group_id']))\n for tweakwcs_info_key in tweakwcs_info_keys:\n if not tweakwcs_info_key.startswith(\"matched\"):\n log.info(\"{} : {}\".format(tweakwcs_info_key,item.meta['fit_info'][tweakwcs_info_key]))\n log.info(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n log.info(\"nmatchesCheck: {} radialOffsetCheck: {} largeRmsCheck: {}, consistencyCheck: {}\".format(nmatchesCheck,radialOffsetCheck,largeRmsCheck,consistencyCheck))\n\n\n # determine which fit quality category this latest fit falls into\n if overall_valid == False:\n fitQual = 5\n log.info(\"FIT SOLUTION REJECTED\")\n filteredTable['status'][:] = 1\n for ctr in range(0, len(filteredTable)):\n filteredTable[ctr]['processMsg'] = fitStatusDict[filteredTable[ctr]['imageName'] + \",1\"][\"reason\"]\n else:\n for ctr in range(0, len(filteredTable)):\n filteredTable[ctr]['processMsg'] = \"\"\n if overall_comp == False and max_rms_val < 10.:\n log.info(\"Valid solution with RMS < 10 mas found!\")\n fitQual = 1\n elif overall_comp == True and max_rms_val < 10.:\n log.info(\"Valid but compromised solution with RMS < 10 mas found!\")\n fitQual = 2\n elif overall_comp == False and max_rms_val >= 10.:\n log.info(\"Valid solution with RMS >= 10 mas found!\")\n fitQual = 3\n else:\n log.info(\"Valid but compromised solution with RMS >= 10 mas found!\")\n fitQual = 4\n\n if print_fit_parameters:\n for item in imglist: log.info(fitStatusDict[\"{},{}\".format(item.meta['name'], item.meta['chip'])])\n\n if max_rms_val > MAX_FIT_RMS:\n log.info(\"Total fit RMS value = {} mas greater than the maximum threshold value {}.\".format(max_rms_val, MAX_FIT_RMS))\n if not overall_valid:\n log.info(\"The fit solution for some or all of the images is not valid.\")\n if max_rms_val > MAX_FIT_RMS or overall_valid == False:\n log.info(\"Try again with the next catalog\")\n else:\n log.info(\"Fit calculations successful.\")\n return max_rms_val, num_xmatches, fitQual, filteredTable, fitStatusDict", "response": "This function determines the quality of the fit to the data set."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef generate_astrometric_catalog(imglist, **pars):\n # generate catalog\n temp_pars = pars.copy()\n if pars['output'] == True:\n pars['output'] = 'ref_cat.ecsv'\n else:\n pars['output'] = None\n out_catalog = amutils.create_astrometric_catalog(imglist,**pars)\n pars = temp_pars.copy()\n #if the catalog has contents, write the catalog to ascii text file\n if len(out_catalog) > 0 and pars['output']:\n catalog_filename = \"refcatalog.cat\"\n out_catalog.write(catalog_filename, format=\"ascii.fast_commented_header\")\n log.info(\"Wrote reference catalog {}.\".format(catalog_filename))\n\n return(out_catalog)", "response": "Generates a new astrometric catalog of all sources from an existing astrometric catalog."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngenerates a dictionary of source catalogs keyed by image name.", "response": "def generate_source_catalogs(imglist, **pars):\n \"\"\"Generates a dictionary of source catalogs keyed by image name.\n\n Parameters\n ----------\n imglist : list\n List of one or more calibrated fits images that will be used for source detection.\n\n Returns\n -------\n sourcecatalogdict : dictionary\n a dictionary (keyed by image name) of two element dictionaries which in tern contain 1) a dictionary of the\n detector-specific processing parameters and 2) an astropy table of position and photometry information of all\n detected sources\n \"\"\"\n output = pars.get('output', False)\n sourcecatalogdict = {}\n for imgname in imglist:\n log.info(\"Image name: {}\".format(imgname))\n\n sourcecatalogdict[imgname] = {}\n\n # open image\n imghdu = fits.open(imgname)\n imgprimaryheader = imghdu[0].header\n instrument = imgprimaryheader['INSTRUME'].lower()\n detector = imgprimaryheader['DETECTOR'].lower()\n\n # get instrument/detector-specific image alignment parameters\n if instrument in detector_specific_params.keys():\n if detector in detector_specific_params[instrument].keys():\n detector_pars = detector_specific_params[instrument][detector]\n # to allow generate_source_catalog to get detector specific parameters\n detector_pars.update(pars)\n sourcecatalogdict[imgname][\"params\"] = detector_pars\n else:\n sys.exit(\"ERROR! Unrecognized detector '{}'. Exiting...\".format(detector))\n log.error(\"ERROR! Unrecognized detector '{}'. Exiting...\".format(detector))\n else:\n sys.exit(\"ERROR! Unrecognized instrument '{}'. Exiting...\".format(instrument))\n log.error(\"ERROR! Unrecognized instrument '{}'. Exiting...\".format(instrument))\n\n # Identify sources in image, convert coords from chip x, y form to reference WCS sky RA, Dec form.\n imgwcs = HSTWCS(imghdu, 1)\n fwhmpsf_pix = sourcecatalogdict[imgname][\"params\"]['fwhmpsf']/imgwcs.pscale #Convert fwhmpsf from arsec to pixels\n sourcecatalogdict[imgname][\"catalog_table\"] = amutils.generate_source_catalog(imghdu, fwhm=fwhmpsf_pix, **detector_pars)\n\n # write out coord lists to files for diagnostic purposes. Protip: To display the sources in these files in DS9,\n # set the \"Coordinate System\" option to \"Physical\" when loading the region file.\n imgroot = os.path.basename(imgname).split('_')[0]\n numSci = amutils.countExtn(imghdu)\n # Allow user to decide when and how to write out catalogs to files\n if output:\n for chip in range(1,numSci+1):\n chip_cat = sourcecatalogdict[imgname][\"catalog_table\"][chip]\n if chip_cat and len(chip_cat) > 0:\n regfilename = \"{}_sci{}_src.reg\".format(imgroot, chip)\n out_table = Table(chip_cat)\n out_table.write(regfilename, include_names=[\"xcentroid\", \"ycentroid\"], format=\"ascii.fast_commented_header\")\n log.info(\"Wrote region file {}\\n\".format(regfilename))\n imghdu.close()\n return(sourcecatalogdict)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update_image_wcs_info(tweakwcs_output):\n out_headerlet_dict = {}\n for item in tweakwcs_output:\n imageName = item.meta['filename']\n chipnum = item.meta['chip']\n if chipnum == 1:\n chipctr = 1\n hdulist = fits.open(imageName, mode='update')\n num_sci_ext = amutils.countExtn(hdulist)\n\n # generate wcs name for updated image header, headerlet\n if not hdulist['SCI',1].header['WCSNAME'] or hdulist['SCI',1].header['WCSNAME'] ==\"\": #Just in case header value 'wcsname' is empty.\n wcsName = \"FIT_{}\".format(item.meta['catalog_name'])\n else:\n wname = hdulist['sci', 1].header['wcsname']\n if \"-\" in wname:\n wcsName = '{}-FIT_{}'.format(wname[:wname.index('-')], item.meta['fit_info']['catalog'])\n else:\n wcsName = '{}-FIT_{}'.format(wname, item.meta['fit_info']['catalog'])\n\n # establish correct mapping to the science extensions\n sciExtDict = {}\n for sciExtCtr in range(1, num_sci_ext + 1):\n sciExtDict[\"{}\".format(sciExtCtr)] = fileutil.findExtname(hdulist,'sci',extver=sciExtCtr)\n\n # update header with new WCS info\n updatehdr.update_wcs(hdulist, sciExtDict[\"{}\".format(item.meta['chip'])], item.wcs, wcsname=wcsName,\n reusename=True, verbose=True)\n if chipctr == num_sci_ext:\n # Close updated flc.fits or flt.fits file\n #log.info(\"CLOSE {}\\n\".format(imageName)) # TODO: Remove before deployment\n hdulist.flush()\n hdulist.close()\n\n # Create headerlet\n out_headerlet = headerlet.create_headerlet(imageName, hdrname=wcsName, wcsname=wcsName)\n\n # Update headerlet\n update_headerlet_phdu(item, out_headerlet)\n\n # Write headerlet\n if imageName.endswith(\"flc.fits\"):\n headerlet_filename = imageName.replace(\"flc\", \"flt_hlet\")\n if imageName.endswith(\"flt.fits\"):\n headerlet_filename = imageName.replace(\"flt\", \"flt_hlet\")\n out_headerlet.writeto(headerlet_filename, clobber=True)\n log.info(\"Wrote headerlet file {}.\\n\\n\".format(headerlet_filename))\n out_headerlet_dict[imageName] = headerlet_filename\n\n # Attach headerlet as HDRLET extension\n headerlet.attach_headerlet(imageName, headerlet_filename)\n\n chipctr +=1\n return (out_headerlet_dict)", "response": "Update image header and WCS information for every valid image."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nupdates the primary header data unit keywords of a headerlet object in - place.", "response": "def update_headerlet_phdu(tweakwcs_item, headerlet):\n \"\"\"Update the primary header data unit keywords of a headerlet object in-place\n\n Parameters\n ==========\n tweakwc_item :\n Basically the output from tweakwcs which contains the cross match and fit\n information for every chip of every valid input image.\n\n headerlet :\n object containing WCS information\n \"\"\"\n\n # Get the data to be used as values for FITS keywords\n rms_ra = tweakwcs_item.meta['fit_info']['RMS_RA'].value\n rms_dec = tweakwcs_item.meta['fit_info']['RMS_DEC'].value\n fit_rms = tweakwcs_item.meta['fit_info']['FIT_RMS']\n nmatch = tweakwcs_item.meta['fit_info']['nmatches']\n catalog = tweakwcs_item.meta['fit_info']['catalog']\n\n x_shift = (tweakwcs_item.meta['fit_info']['shift'])[0]\n y_shift = (tweakwcs_item.meta['fit_info']['shift'])[1]\n rot = tweakwcs_item.meta['fit_info']['rot']\n scale = tweakwcs_item.meta['fit_info']['scale'][0]\n skew = tweakwcs_item.meta['fit_info']['skew']\n\n # Update the existing FITS keywords\n primary_header = headerlet[0].header\n primary_header['RMS_RA'] = rms_ra\n primary_header['RMS_DEC'] = rms_dec\n primary_header['NMATCH'] = nmatch\n primary_header['CATALOG'] = catalog\n\n # Create a new FITS keyword\n primary_header['FIT_RMS'] = (fit_rms, 'RMS (mas) of the 2D fit of the headerlet solution')\n\n # Create the set of HISTORY keywords\n primary_header['HISTORY'] = '~~~~~ FIT PARAMETERS ~~~~~'\n primary_header['HISTORY'] = '{:>15} : {:9.4f} \"/pixels'.format('platescale', tweakwcs_item.wcs.pscale)\n primary_header['HISTORY'] = '{:>15} : {:9.4f} pixels'.format('x_shift', x_shift)\n primary_header['HISTORY'] = '{:>15} : {:9.4f} pixels'.format('y_shift', y_shift)\n primary_header['HISTORY'] = '{:>15} : {:9.4f} degrees'.format('rotation', rot)\n primary_header['HISTORY'] = '{:>15} : {:9.4f}'.format('scale', scale)\n primary_header['HISTORY'] = '{:>15} : {:9.4f}'.format('skew', skew)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef interpret_fit_rms(tweakwcs_output, reference_catalog):\n # Start by collecting information by group_id\n group_ids = [info.meta['group_id'] for info in tweakwcs_output]\n # Compress the list to have only unique group_id values to avoid some unnecessary iterations\n group_ids = list(set(group_ids))\n group_dict = {'avg_RMS':None}\n obs_rms = []\n for group_id in group_ids:\n for item in tweakwcs_output:\n # When status = FAILED (fit failed) or REFERENCE (relative alignment done with first image\n # as the reference), skip to the beginning of the loop as there is no 'fit_info'.\n if item.meta['fit_info']['status'] != 'SUCCESS':\n continue\n # Make sure to store data for any particular group_id only once.\n if item.meta['group_id'] == group_id and \\\n group_id not in group_dict:\n group_dict[group_id] = {'ref_idx':None, 'FIT_RMS':None}\n log.info(\"fit_info: {}\".format(item.meta['fit_info']))\n tinfo = item.meta['fit_info']\n ref_idx = tinfo['matched_ref_idx']\n fitmask = tinfo['fitmask']\n group_dict[group_id]['ref_idx'] = ref_idx\n ref_RA = reference_catalog[ref_idx]['RA'][fitmask]\n ref_DEC = reference_catalog[ref_idx]['DEC'][fitmask]\n input_RA = tinfo['fit_RA']\n input_DEC = tinfo['fit_DEC']\n img_coords = SkyCoord(input_RA, input_DEC,\n unit='deg',frame='icrs')\n ref_coords = SkyCoord(ref_RA, ref_DEC, unit='deg',frame='icrs')\n dra, ddec = img_coords.spherical_offsets_to(ref_coords)\n ra_rms = np.std(dra.to(u.mas))\n dec_rms = np.std(ddec.to(u.mas))\n fit_rms = np.std(Angle(img_coords.separation(ref_coords), unit=u.mas)).value\n group_dict[group_id]['FIT_RMS'] = fit_rms\n group_dict[group_id]['RMS_RA'] = ra_rms\n group_dict[group_id]['RMS_DEC'] = dec_rms\n\n obs_rms.append(fit_rms)\n # Compute RMS for entire ASN/observation set\n total_rms = np.mean(obs_rms)\n #total_rms = np.sqrt(np.sum(np.array(obs_rms)**2))\n\n # Now, append computed results to tweakwcs_output\n for item in tweakwcs_output:\n group_id = item.meta['group_id']\n if group_id in group_dict:\n fit_rms = group_dict[group_id]['FIT_RMS']\n ra_rms = group_dict[group_id]['RMS_RA']\n dec_rms = group_dict[group_id]['RMS_DEC']\n else:\n fit_rms = None\n ra_rms = None\n dec_rms = None\n\n item.meta['fit_info']['FIT_RMS'] = fit_rms\n item.meta['fit_info']['TOTAL_RMS'] = total_rms\n item.meta['fit_info']['NUM_FITS'] = len(group_ids)\n item.meta['fit_info']['RMS_RA'] = ra_rms\n item.meta['fit_info']['RMS_DEC'] = dec_rms\n item.meta['fit_info']['catalog'] = reference_catalog.meta['catalog']", "response": "Interpret the FIT information to convert RMS to physical units."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef sky(input=None,outExt=None,configObj=None, group=None, editpars=False, **inputDict):\n\n\n if input is not None:\n inputDict['input']=input\n inputDict['output']=None\n inputDict['updatewcs']=False\n inputDict['group']=group\n else:\n print(\"Please supply an input image\", file=sys.stderr)\n raise ValueError\n\n configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))\n if configObj is None:\n return\n\n if not editpars:\n run(configObj,outExt=outExt)", "response": "This function performs a sky subtraction on a list of images and returns a copy of the original input image."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\napply sky value as read in from a user-supplied input file.", "response": "def _skyUserFromFile(imageObjList, skyFile, apply_sky=None):\n \"\"\"\n Apply sky value as read in from a user-supplied input file.\n\n \"\"\"\n skyKW=\"MDRIZSKY\" #header keyword that contains the sky that's been subtracted\n\n # create dict of fname=sky pairs\n skyvals = {}\n if apply_sky is None:\n skyapplied = False # flag whether sky has already been applied to images\n else:\n skyapplied = apply_sky\n\n for line in open(skyFile):\n if apply_sky is None and line[0] == '#' and 'applied' in line:\n if '=' in line: linesep = '='\n if ':' in line: linesep = ':'\n appliedstr = line.split(linesep)[1].strip()\n if appliedstr.lower() in ['yes','true','y','t']:\n skyapplied = True\n print('...Sky values already applied by user...')\n\n if not util.is_blank(line) and line[0] != '#':\n lspl = line.split()\n svals = []\n for lvals in lspl[1:]:\n svals.append(float(lvals))\n skyvals[lspl[0]] = svals\n\n # Apply user values to appropriate input images\n for imageSet in imageObjList:\n fname = imageSet._filename\n numchips=imageSet._numchips\n sciExt=imageSet.scienceExt\n if fname in skyvals:\n print(\" ...updating MDRIZSKY with user-supplied value.\")\n for chip in range(1,numchips+1,1):\n if len(skyvals[fname]) == 1:\n _skyValue = skyvals[fname][0]\n else:\n _skyValue = skyvals[fname][chip-1]\n\n chipext = '%s,%d'%(sciExt,chip)\n _updateKW(imageSet[chipext],fname,(sciExt,chip),skyKW,_skyValue)\n\n # Update internal record with subtracted sky value\n #\n # .computedSky: value to be applied by the\n # adrizzle/ablot steps.\n # .subtractedSky: value already (or will be by adrizzle/ablot)\n # subtracted from the image\n if skyapplied:\n imageSet[chipext].computedSky = None # used by adrizzle/ablot\n else:\n imageSet[chipext].computedSky = _skyValue\n imageSet[chipext].subtractedSky = _skyValue\n print(\"Setting \",skyKW,\"=\",_skyValue)\n else:\n print(\"*\"*40)\n print(\"*\")\n print(\"WARNING:\")\n print(\" .... NO user-supplied sky value found for \",fname)\n print(\" .... Setting sky to a value of 0.0! \")\n print(\"*\")\n print(\"*\"*40)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _skyUserFromHeaderKwd(imageSet,paramDict):\n _skyValue=0.0 #this will be the sky value computed for the exposure\n skyKW=\"MDRIZSKY\" #header keyword that contains the sky that's been subtracted\n\n #just making sure, tricky users and all, these are things that will be used\n #by the sky function so we want them defined at least\n try:\n assert imageSet._numchips > 0, \"invalid value for number of chips\"\n assert imageSet._filename != '', \"image object filename is empty!, doh!\"\n assert imageSet._rootname != '', \"image rootname is empty!, doh!\"\n assert imageSet.scienceExt !='', \"image object science extension is empty!\"\n\n except AssertionError:\n raise AssertionError\n\n numchips=imageSet._numchips\n sciExt=imageSet.scienceExt\n\n # User Subtraction Case, User has done own sky subtraction,\n # so use the image header value for subtractedsky value\n skyuser=paramDict[\"skyuser\"]\n\n if skyuser != '':\n print(\"User has computed their own sky values...\")\n\n if skyuser != skyKW:\n print(\" ...updating MDRIZSKY with supplied value.\")\n for chip in range(1,numchips+1,1):\n chipext = '%s,%d'%(sciExt,chip)\n if not imageSet[chipext].group_member:\n # skip extensions/chips that will not be processed\n continue\n try:\n _skyValue = imageSet[chipext].header[skyuser]\n except:\n print(\"**************************************************************\")\n print(\"*\")\n print(\"* Cannot find keyword \",skyuser,\" in \",imageSet._filename)\n print(\"*\")\n print(\"**************************************************************\\n\\n\\n\")\n raise KeyError\n\n _updateKW(imageSet[sciExt+','+str(chip)],\n imageSet._filename,(sciExt,chip),skyKW,_skyValue)\n\n # Update internal record with subtracted sky value\n imageSet[chipext].subtractedSky = _skyValue\n imageSet[chipext].computedSky = None\n print(\"Setting \",skyKW,\"=\",_skyValue)", "response": "This function is used to subtract the sky value from all the chips in the image file that imageSet represents and the image header keyword that contains the sky user in the imageSet."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nsubtracting the sky from all the chips in the imagefile that imageSet represents imageSet is a single imageObject reference paramDict should be the subset from an actual config object if saveFile=True, then images that have been sky subtracted are saved to a predetermined output name else, overwrite the input images with the sky-subtracted results the output from sky subtraction is a copy of the original input file where all the science data extensions have been sky subtracted", "response": "def _skySub(imageSet,paramDict,saveFile=False):\n \"\"\"\n subtract the sky from all the chips in the imagefile that imageSet represents\n\n imageSet is a single imageObject reference\n paramDict should be the subset from an actual config object\n if saveFile=True, then images that have been sky subtracted are saved to a predetermined output name\n else, overwrite the input images with the sky-subtracted results\n\n the output from sky subtraction is a copy of the original input file\n where all the science data extensions have been sky subtracted\n\n \"\"\"\n\n _skyValue=0.0 #this will be the sky value computed for the exposure\n skyKW=\"MDRIZSKY\" #header keyword that contains the sky that's been subtracted\n\n #just making sure, tricky users and all, these are things that will be used\n #by the sky function so we want them defined at least\n try:\n assert imageSet._numchips > 0, \"invalid value for number of chips\"\n assert imageSet._filename != '', \"image object filename is empty!, doh!\"\n assert imageSet._rootname != '', \"image rootname is empty!, doh!\"\n assert imageSet.scienceExt !='', \"image object science extension is empty!\"\n\n except AssertionError:\n raise AssertionError\n\n numchips=imageSet._numchips\n sciExt=imageSet.scienceExt\n\n # User Subtraction Case, User has done own sky subtraction,\n # so use the image header value for subtractedsky value\n skyuser=paramDict[\"skyuser\"]\n\n if skyuser != '':\n print(\"User has computed their own sky values...\")\n\n if skyuser != skyKW:\n print(\" ...updating MDRIZSKY with supplied value.\")\n for chip in range(1,numchips+1,1):\n try:\n chipext = '%s,%d'%(sciExt,chip)\n _skyValue = imageSet[chipext].header[skyuser]\n\n except:\n print(\"**************************************************************\")\n print(\"*\")\n print(\"* Cannot find keyword \",skyuser,\" in \",imageSet._filename)\n print(\"*\")\n print(\"**************************************************************\\n\\n\\n\")\n raise KeyError\n\n _updateKW(imageSet[sciExt+','+str(chip)],imageSet._filename,(sciExt,chip),skyKW,_skyValue)\n\n # Update internal record with subtracted sky value\n imageSet[chipext].subtractedSky = _skyValue\n imageSet[chipext].computedSky = None\n print(\"Setting \",skyKW,\"=\",_skyValue)\n\n else:\n # Compute our own sky values and record the values for use later.\n # The minimum sky value from all the science chips in the exposure\n # is used as the reference sky for each chip\n\n log.info(\"Computing minimum sky ...\")\n minSky=[] #store the sky for each chip\n minpscale = []\n\n for chip in range(1,numchips+1,1):\n myext=sciExt+\",\"+str(chip)\n\n #add the data back into the chip, leave it there til the end of this function\n imageSet[myext].data=imageSet.getData(myext)\n\n image=imageSet[myext]\n _skyValue= _computeSky(image, paramDict, memmap=False)\n #scale the sky value by the area on sky\n # account for the case where no IDCSCALE has been set, due to a\n # lack of IDCTAB or to 'coeffs=False'.\n pscale=imageSet[myext].wcs.idcscale\n if pscale is None:\n log.warning(\"No Distortion coefficients available...using \"\n \"default plate scale.\")\n pscale = imageSet[myext].wcs.pscale\n _scaledSky=_skyValue / (pscale**2)\n #_skyValue=_scaledSky\n minSky.append(_scaledSky)\n minpscale.append(pscale)\n\n _skyValue = min(minSky)\n\n _reportedSky = _skyValue*(minpscale[minSky.index(_skyValue)]**2)\n log.info(\"Minimum sky value for all chips %s\" % _reportedSky)\n\n #now subtract that value from all the chips in the exposure\n #and update the chips header keyword with the sub\n for chip in range(1,numchips+1,1):\n image=imageSet[sciExt,chip]\n myext = sciExt+\",\"+str(chip)\n # account for the case where no IDCSCALE has been set, due to a\n # lack of IDCTAB or to 'coeffs=False'.\n idcscale = image.wcs.idcscale\n if idcscale is None: idcscale = image.wcs.pscale\n _scaledSky=_skyValue * (idcscale**2)\n image.subtractedSky = _scaledSky\n image.computedSky = _scaledSky\n log.info(\"Using sky from chip %d: %f\\n\" % (chip,_scaledSky))\n ###_subtractSky(image,(_scaledSky))\n # Update the header so that the keyword in the image is\n #the sky value which should be subtracted from the image\n _updateKW(image,imageSet._filename,(sciExt,chip),skyKW,_scaledSky)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncomputes the sky value for the data array passed to the function", "response": "def _computeSky(image, skypars, memmap=False):\n\n \"\"\"\n Compute the sky value for the data array passed to the function\n image is a fits object which contains the data and the header\n for one image extension\n\n skypars is passed in as paramDict\n\n \"\"\"\n #this object contains the returned values from the image stats routine\n _tmp = imagestats.ImageStats(image.data,\n fields = skypars['skystat'],\n lower = skypars['skylower'],\n upper = skypars['skyupper'],\n nclip = skypars['skyclip'],\n lsig = skypars['skylsigma'],\n usig = skypars['skyusigma'],\n binwidth = skypars['skywidth']\n )\n\n _skyValue = _extractSkyValue(_tmp,skypars['skystat'].lower())\n log.info(\" Computed sky value/pixel for %s: %s \"%\n (image.rootname, _skyValue))\n\n del _tmp\n\n return _skyValue"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _subtractSky(image,skyValue,memmap=False):\n try:\n np.subtract(image.data,skyValue,image.data)\n\n except IOError:\n print(\"Unable to perform sky subtraction on data array\")\n raise IOError", "response": "Subtracts the given sky value from each image in the data array."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the header with the kw value", "response": "def _updateKW(image, filename, exten, skyKW, Value):\n \"\"\"update the header with the kw,value\"\"\"\n # Update the value in memory\n image.header[skyKW] = Value\n\n # Now update the value on disk\n if isinstance(exten,tuple):\n strexten = '[%s,%s]'%(exten[0],str(exten[1]))\n else:\n strexten = '[%s]'%(exten)\n log.info('Updating keyword %s in %s' % (skyKW, filename + strexten))\n fobj = fileutil.openImage(filename, mode='update', memmap=False)\n fobj[exten].header[skyKW] = (Value, 'Sky value computed by AstroDrizzle')\n fobj.close()"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _addDefaultSkyKW(imageObjList):\n skyKW = \"MDRIZSKY\"\n Value = 0.0\n for imageSet in imageObjList:\n fname = imageSet._filename\n numchips=imageSet._numchips\n sciExt=imageSet.scienceExt\n fobj = fileutil.openImage(fname, mode='update', memmap=False)\n for chip in range(1,numchips+1,1):\n ext = (sciExt,chip)\n if not imageSet[ext].group_member:\n # skip over extensions not used in processing\n continue\n if skyKW not in fobj[ext].header:\n fobj[ext].header[skyKW] = (Value, 'Sky value computed by AstroDrizzle')\n log.info(\"MDRIZSKY keyword not found in the %s[%s,%d] header.\"%(\n fname,sciExt,chip))\n log.info(\" Adding MDRIZSKY to header with default value of 0.\")\n fobj.close()", "response": "Add MDRIZSKY keyword to all input images and SCI headers of all input images."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef help(file=None):\n helpstr = getHelpAsString(docstring=True, show_ver = True)\n if file is None:\n print(helpstr)\n else:\n if os.path.exists(file): os.remove(file)\n f = open(file, mode = 'w')\n f.write(helpstr)\n f.close()", "response": "Print out syntax help for running astrodrizzle\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef rd2xy(input,ra=None,dec=None,coordfile=None,colnames=None,\n precision=6,output=None,verbose=True):\n \"\"\" Primary interface to perform coordinate transformations from\n pixel to sky coordinates using STWCS and full distortion models\n read from the input image header.\n \"\"\"\n single_coord = False\n if coordfile is not None:\n if colnames in blank_list:\n colnames = ['c1','c2']\n elif isinstance(colnames,type('a')):\n colnames = colnames.split(',')\n # convert input file coordinates to lists of decimal degrees values\n xlist,ylist = tweakutils.readcols(coordfile,cols=colnames)\n else:\n if isinstance(ra,np.ndarray):\n ralist = ra.tolist()\n declist = dec.tolist()\n elif not isinstance(ra, list):\n ralist = [ra]\n declist = [dec]\n else:\n ralist = ra\n declist = dec\n xlist = [0]*len(ralist)\n ylist = [0]*len(ralist)\n if len(xlist) == 1:\n single_coord = True\n for i,(r,d) in enumerate(zip(ralist,declist)):\n # convert input value into decimal degrees value\n xval,yval = tweakutils.parse_skypos(r,d)\n xlist[i] = xval\n ylist[i] = yval\n\n # start by reading in WCS+distortion info for input image\n inwcs = wcsutil.HSTWCS(input)\n if inwcs.wcs.is_unity():\n print(\"####\\nNo valid WCS found in {}.\\n Results may be invalid.\\n####\\n\".format(input))\n\n # Now, convert pixel coordinates into sky coordinates\n try:\n outx,outy = inwcs.all_world2pix(xlist,ylist,1)\n except RuntimeError:\n outx,outy = inwcs.wcs_world2pix(xlist,ylist,1)\n\n # add formatting based on precision here...\n xstr = []\n ystr = []\n fmt = \"%.\"+repr(precision)+\"f\"\n for x,y in zip(outx,outy):\n xstr.append(fmt%x)\n ystr.append(fmt%y)\n\n if verbose or (not verbose and util.is_blank(output)):\n print ('# Coordinate transformations for ',input)\n print('# X Y RA Dec\\n')\n for x,y,r,d in zip(xstr,ystr,xlist,ylist):\n print(\"%s %s %s %s\"%(x,y,r,d))\n\n # Create output file, if specified\n if output:\n f = open(output,mode='w')\n f.write(\"# Coordinates converted from %s\\n\"%input)\n for x,y in zip(xstr,ystr):\n f.write('%s %s\\n'%(x,y))\n f.close()\n print('Wrote out results to: ',output)\n\n if single_coord:\n outx = outx[0]\n outy = outy[0]\n\n return outx, outy", "response": "This function converts pixel coordinates to sky coordinates using STWCS and full distortion models from the input image header."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate an astrometric catalog that covers the input images field - of - view.", "response": "def create_astrometric_catalog(inputs, **pars):\n \"\"\"Create an astrometric catalog that covers the inputs' field-of-view.\n\n Parameters\n ----------\n input : str, list\n Filenames of images to be aligned to astrometric catalog\n\n catalog : str, optional\n Name of catalog to extract astrometric positions for sources in the\n input images' field-of-view. Default: GAIADR2. Options available are\n documented on the catalog web page.\n\n output : str, optional\n Filename to give to the astrometric catalog read in from the master\n catalog web service. If None, no file will be written out.\n\n gaia_only : bool, optional\n Specify whether or not to only use sources from GAIA in output catalog\n Default: False\n\n existing_wcs : ~stwcs.wcsutils.HSTWCS`\n existing WCS object specified by the user\n\n Notes\n -----\n This function will point to astrometric catalog web service defined\n through the use of the ASTROMETRIC_CATALOG_URL environment variable.\n\n Returns\n -------\n ref_table : ~.astropy.table.Table`\n Astropy Table object of the catalog\n\n \"\"\"\n # interpret input parameters\n catalog = pars.get(\"catalog\", 'GAIADR2')\n output = pars.get(\"output\", 'ref_cat.ecsv')\n gaia_only = pars.get(\"gaia_only\", False)\n table_format = pars.get(\"table_format\", 'ascii.ecsv')\n existing_wcs = pars.get(\"existing_wcs\", None)\n\n inputs, _ = parseinput.parseinput(inputs)\n # start by creating a composite field-of-view for all inputs\n # This default output WCS will have the same plate-scale and orientation\n # as the first chip in the list, which for WFPC2 data means the PC.\n # Fortunately, for alignment, this doesn't matter since no resampling of\n # data will be performed\n if existing_wcs:\n outwcs = existing_wcs\n else:\n outwcs = build_reference_wcs(inputs)\n radius = compute_radius(outwcs)\n ra, dec = outwcs.wcs.crval\n\n # perform query for this field-of-view\n ref_dict = get_catalog(ra, dec, sr=radius, catalog=catalog)\n colnames = ('ra', 'dec', 'mag', 'objID', 'GaiaID')\n col_types = ('f8', 'f8', 'f4', 'U25', 'U25')\n ref_table = Table(names=colnames, dtype=col_types)\n\n # Add catalog name as meta data\n ref_table.meta['catalog'] = catalog\n ref_table.meta['gaia_only'] = gaia_only\n\n # rename coordinate columns to be consistent with tweakwcs\n ref_table.rename_column('ra', 'RA')\n ref_table.rename_column('dec', 'DEC')\n\n # extract just the columns we want...\n num_sources = 0\n for source in ref_dict:\n if 'GAIAsourceID' in source:\n g = source['GAIAsourceID']\n if gaia_only and g.strip() == '':\n continue\n else:\n g = \"-1\" # indicator for no source ID extracted\n r = float(source['ra'])\n d = float(source['dec'])\n m = -999.9 # float(source['mag'])\n o = source['objID']\n num_sources += 1\n ref_table.add_row((r, d, m, o, g))\n\n # Write out table to a file, if specified\n if output:\n ref_table.write(output, format=table_format)\n log.info(\"Created catalog '{}' with {} sources\".format(output, num_sources))\n\n return ref_table"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates the reference WCS based on all the inputs for a field", "response": "def build_reference_wcs(inputs, sciname='sci'):\n \"\"\"Create the reference WCS based on all the inputs for a field\"\"\"\n # start by creating a composite field-of-view for all inputs\n wcslist = []\n for img in inputs:\n nsci = countExtn(img)\n for num in range(nsci):\n extname = (sciname, num + 1)\n if sciname == 'sci':\n extwcs = wcsutil.HSTWCS(img, ext=extname)\n else:\n # Working with HDRLET as input and do the best we can...\n extwcs = read_hlet_wcs(img, ext=extname)\n\n wcslist.append(extwcs)\n\n # This default output WCS will have the same plate-scale and orientation\n # as the first chip in the list, which for WFPC2 data means the PC.\n # Fortunately, for alignment, this doesn't matter since no resampling of\n # data will be performed\n outwcs = utils.output_wcs(wcslist)\n\n return outwcs"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_catalog(ra, dec, sr=0.1, fmt='CSV', catalog='GSC241'):\n serviceType = 'vo/CatalogSearch.aspx'\n spec_str = 'RA={}&DEC={}&SR={}&FORMAT={}&CAT={}&MINDET=5'\n headers = {'Content-Type': 'text/csv'}\n\n spec = spec_str.format(ra, dec, sr, fmt, catalog)\n serviceUrl = '{}/{}?{}'.format(SERVICELOCATION, serviceType, spec)\n rawcat = requests.get(serviceUrl, headers=headers)\n r_contents = rawcat.content.decode() # convert from bytes to a String\n rstr = r_contents.split('\\r\\n')\n # remove initial line describing the number of sources returned\n # CRITICAL to proper interpretation of CSV data\n del rstr[0]\n r_csv = csv.DictReader(rstr)\n\n return r_csv", "response": "Extract a catalog from VO web service."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncomputing the radius from the center to the furthest edge of the WCS.", "response": "def compute_radius(wcs):\n \"\"\"Compute the radius from the center to the furthest edge of the WCS.\"\"\"\n\n ra, dec = wcs.wcs.crval\n img_center = SkyCoord(ra=ra * u.degree, dec=dec * u.degree)\n wcs_foot = wcs.calc_footprint()\n img_corners = SkyCoord(ra=wcs_foot[:, 0] * u.degree,\n dec=wcs_foot[:, 1] * u.degree)\n radius = img_center.separation(img_corners).max().value\n\n return radius"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'):\n serviceType = \"GSCConvert/GSCconvert.aspx\"\n spec_str = \"TRANSFORM={}-{}&IPPPSSOOT={}\"\n\n if 'rootname' in pf.getheader(image):\n ippssoot = pf.getval(image, 'rootname').upper()\n else:\n ippssoot = fu.buildNewRootname(image).upper()\n\n spec = spec_str.format(input_catalog, output_catalog, ippssoot)\n serviceUrl = \"{}/{}?{}\".format(SERVICELOCATION, serviceType, spec)\n rawcat = requests.get(serviceUrl)\n if not rawcat.ok:\n log.info(\"Problem accessing service with:\\n{{}\".format(serviceUrl))\n raise ValueError\n\n delta_ra = delta_dec = None\n tree = BytesIO(rawcat.content)\n for _, element in etree.iterparse(tree):\n if element.tag == 'deltaRA':\n delta_ra = float(element.text)\n elif element.tag == 'deltaDEC':\n delta_dec = float(element.text)\n\n return delta_ra, delta_dec", "response": "Find the GSC to GAIA offset based on guide star coordinates relative to GAIA."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nuses photutils to find sources in image based on segmentation.", "response": "def extract_sources(img, **pars):\n \"\"\"Use photutils to find sources in image based on segmentation.\n\n Parameters\n ----------\n dqmask : ndarray\n Bitmask which identifies whether a pixel should be used (1) in source\n identification or not(0). If provided, this mask will be applied to the\n input array prior to source identification.\n\n fwhm : float\n Full-width half-maximum (fwhm) of the PSF in pixels.\n\n threshold : float or None\n Value from the image which serves as the limit for determining sources.\n If None, compute a default value of (background+5*rms(background)).\n If threshold < 0.0, use absolute value as scaling factor for default value.\n\n source_box : int\n Size of box (in pixels) which defines the minimum size of a valid source.\n\n classify : bool\n Specify whether or not to apply classification based on invarient moments\n of each source to determine whether or not a source is likely to be a\n cosmic-ray, and not include those sources in the final catalog.\n\n centering_mode : str\n \"segmentaton\" or \"starfind\"\n Algorithm to use when computing the positions of the detected sources.\n Centering will only take place after `threshold` has been determined, and\n sources are identified using segmentation. Centering using `segmentation`\n will rely on `photutils.segmentation.source_properties` to generate the\n properties for the source catalog. Centering using `starfind` will use\n `photutils.IRAFStarFinder` to characterize each source in the catalog.\n\n nlargest : int, None\n Number of largest (brightest) sources in each chip/array to measure\n when using 'starfind' mode.\n\n output : str\n If specified, write out the catalog of sources to the file with this name.\n\n plot : bool\n Specify whether or not to create a plot of the sources on a view of the image.\n\n vmax : float\n If plotting the sources, scale the image to this maximum value.\n\n \"\"\"\n fwhm = pars.get('fwhm', 3.0)\n threshold = pars.get('threshold', None)\n source_box = pars.get('source_box', 7)\n classify = pars.get('classify', True)\n output = pars.get('output', None)\n plot = pars.get('plot', False)\n vmax = pars.get('vmax', None)\n centering_mode = pars.get('centering_mode', 'starfind')\n deblend = pars.get('deblend', False)\n dqmask = pars.get('dqmask', None)\n nlargest = pars.get('nlargest', None)\n # apply any provided dqmask for segmentation only\n if dqmask is not None:\n imgarr = img.copy()\n imgarr[dqmask] = 0\n else:\n imgarr = img\n\n bkg_estimator = MedianBackground()\n bkg = None\n\n exclude_percentiles = [10, 25, 50, 75]\n for percentile in exclude_percentiles:\n try:\n bkg = Background2D(imgarr, (50, 50), filter_size=(3, 3),\n bkg_estimator=bkg_estimator,\n exclude_percentile=percentile)\n # If it succeeds, stop and use that value\n bkg_rms = (5. * bkg.background_rms)\n bkg_rms_mean = bkg.background.mean() + 5. * bkg_rms.std()\n default_threshold = bkg.background + bkg_rms\n if threshold is None or threshold < 0.0:\n if threshold is not None and threshold < 0.0:\n threshold = -1 * threshold * default_threshold\n log.info(\"{} based on {}\".format(threshold.max(), default_threshold.max()))\n bkg_rms_mean = threshold.max()\n else:\n threshold = default_threshold\n else:\n bkg_rms_mean = 3. * threshold\n if bkg_rms_mean < 0:\n bkg_rms_mean = 0.\n break\n except Exception:\n bkg = None\n\n # If Background2D does not work at all, define default scalar values for\n # the background to be used in source identification\n if bkg is None:\n bkg_rms_mean = max(0.01, imgarr.min())\n bkg_rms = bkg_rms_mean * 5\n\n sigma = fwhm * gaussian_fwhm_to_sigma\n kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)\n kernel.normalize()\n segm = detect_sources(imgarr, threshold, npixels=source_box,\n filter_kernel=kernel)\n if deblend:\n segm = deblend_sources(imgarr, segm, npixels=5,\n filter_kernel=kernel, nlevels=16,\n contrast=0.01)\n # If classify is turned on, it should modify the segmentation map\n if classify:\n cat = source_properties(imgarr, segm)\n if len(cat) > 0:\n # Remove likely cosmic-rays based on central_moments classification\n bad_srcs = np.where(classify_sources(cat) == 0)[0] + 1\n segm.remove_labels(bad_srcs) # CAUTION: May be time-consuming!!!\n\n # convert segm to mask for daofind\n if centering_mode == 'starfind':\n src_table = None\n # daofind = IRAFStarFinder(fwhm=fwhm, threshold=5.*bkg.background_rms_median)\n log.info(\"Setting up DAOStarFinder with: \\n fwhm={} threshold={}\".format(fwhm, bkg_rms_mean))\n daofind = DAOStarFinder(fwhm=fwhm, threshold=bkg_rms_mean)\n # Identify nbrightest/largest sources\n if nlargest is not None:\n if nlargest > len(segm.labels):\n nlargest = len(segm.labels)\n large_labels = np.flip(np.argsort(segm.areas) + 1)[: nlargest]\n log.info(\"Looking for sources in {} segments\".format(len(segm.labels)))\n\n for label in segm.labels:\n if nlargest is not None and label not in large_labels:\n continue # Move on to the next segment\n # Get slice definition for the segment with this label\n seg_slice = segm.segments[label - 1].slices\n seg_yoffset = seg_slice[0].start\n seg_xoffset = seg_slice[1].start\n\n # Define raw data from this slice\n detection_img = img[seg_slice]\n # zero out any pixels which do not have this segments label\n detection_img[np.where(segm.data[seg_slice] == 0)] = 0\n\n # Detect sources in this specific segment\n seg_table = daofind(detection_img)\n # Pick out brightest source only\n if src_table is None and len(seg_table) > 0:\n # Initialize final master source list catalog\n src_table = Table(names=seg_table.colnames,\n dtype=[dt[1] for dt in seg_table.dtype.descr])\n if len(seg_table) > 0:\n max_row = np.where(seg_table['peak'] == seg_table['peak'].max())[0][0]\n # Add row for detected source to master catalog\n # apply offset to slice to convert positions into full-frame coordinates\n seg_table['xcentroid'] += seg_xoffset\n seg_table['ycentroid'] += seg_yoffset\n src_table.add_row(seg_table[max_row])\n\n else:\n cat = source_properties(img, segm)\n src_table = cat.to_table()\n # Make column names consistent with IRAFStarFinder column names\n src_table.rename_column('source_sum', 'flux')\n src_table.rename_column('source_sum_err', 'flux_err')\n\n if src_table is not None:\n log.info(\"Total Number of detected sources: {}\".format(len(src_table)))\n else:\n log.info(\"No detected sources!\")\n return None, None\n\n # Move 'id' column from first to last position\n # Makes it consistent for remainder of code\n cnames = src_table.colnames\n cnames.append(cnames[0])\n del cnames[0]\n tbl = src_table[cnames]\n\n if output:\n tbl['xcentroid'].info.format = '.10f' # optional format\n tbl['ycentroid'].info.format = '.10f'\n tbl['flux'].info.format = '.10f'\n if not output.endswith('.cat'):\n output += '.cat'\n tbl.write(output, format='ascii.commented_header')\n log.info(\"Wrote source catalog: {}\".format(output))\n\n if plot and plt is not None:\n norm = None\n if vmax is None:\n norm = ImageNormalize(stretch=SqrtStretch())\n fig, ax = plt.subplots(2, 2, figsize=(8, 8))\n ax[0][0].imshow(imgarr, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)\n ax[0][1].imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))\n ax[0][1].set_title('Segmentation Map')\n ax[1][0].imshow(bkg.background, origin='lower')\n if not isinstance(threshold, float):\n ax[1][1].imshow(threshold, origin='lower')\n return tbl, segm"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef classify_sources(catalog, sources=None):\n moments = catalog.moments_central\n if sources is None:\n sources = (0, len(moments))\n num_sources = sources[1] - sources[0]\n srctype = np.zeros((num_sources,), np.int32)\n for src in range(sources[0], sources[1]):\n # Protect against spurious detections\n src_x = catalog[src].xcentroid\n src_y = catalog[src].ycentroid\n if np.isnan(src_x) or np.isnan(src_y):\n continue\n x, y = np.where(moments[src] == moments[src].max())\n if (x[0] > 1) and (y[0] > 1):\n srctype[src] = 1\n\n return srctype", "response": "This function converts moments_central attribute for source catalog into star flag."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef generate_source_catalog(image, **kwargs):\n if not isinstance(image, pf.HDUList):\n raise ValueError(\"Input {} not fits.HDUList object\".format(image))\n dqname = kwargs.get('dqname', 'DQ')\n output = kwargs.get('output', None)\n # Build source catalog for entire image\n source_cats = {}\n numSci = countExtn(image, extname='SCI')\n\n for chip in range(numSci):\n chip += 1\n # find sources in image\n if output:\n rootname = image[0].header['rootname']\n outroot = '{}_sci{}_src'.format(rootname, chip)\n kwargs['output'] = outroot\n imgarr = image['sci', chip].data\n\n # apply any DQ array, if available\n dqmask = None\n if image.index_of(dqname):\n dqarr = image[dqname, chip].data\n\n # \"grow out\" regions in DQ mask flagged as saturated by several\n # pixels in every direction to prevent the\n # source match algorithm from trying to match multiple sources\n # from one image to a single source in the\n # other or vice-versa.\n # Create temp DQ mask containing all pixels flagged with any value EXCEPT 256\n non_sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=256)\n\n # Create temp DQ mask containing saturated pixels ONLY\n sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=~256)\n\n # Grow out saturated pixels by a few pixels in every direction\n grown_sat_mask = ndimage.binary_dilation(sat_mask, iterations=5)\n\n # combine the two temporary DQ masks into a single composite DQ mask.\n dqmask = np.bitwise_or(non_sat_mask, grown_sat_mask)\n\n # dqmask = bitfield_to_boolean_mask(dqarr, good_mask_value=False)\n # TODO: <---Remove this old no-sat bit grow line once this\n # thing works\n\n seg_tab, segmap = extract_sources(imgarr, dqmask=dqmask, **kwargs)\n seg_tab_phot = seg_tab\n\n source_cats[chip] = seg_tab_phot\n\n return source_cats", "response": "Generates source catalogs for each chip in the input image."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef generate_sky_catalog(image, refwcs, **kwargs):\n # Extract source catalogs for each chip\n source_cats = generate_source_catalog(image, **kwargs)\n\n # Build source catalog for entire image\n master_cat = None\n numSci = countExtn(image, extname='SCI')\n # if no refwcs specified, build one now...\n if refwcs is None:\n refwcs = build_reference_wcs([image])\n for chip in range(numSci):\n chip += 1\n # work with sources identified from this specific chip\n seg_tab_phot = source_cats[chip]\n if seg_tab_phot is None:\n continue\n # Convert pixel coordinates from this chip to sky coordinates\n chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip))\n seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'], seg_tab_phot['ycentroid'], 1)\n # Convert sky positions to pixel positions in the reference WCS frame\n seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1)\n seg_tab_phot['xcentroid'] = seg_xy_out[0]\n seg_tab_phot['ycentroid'] = seg_xy_out[1]\n if master_cat is None:\n master_cat = seg_tab_phot\n else:\n master_cat = vstack([master_cat, seg_tab_phot])\n\n return master_cat", "response": "Generates a source catalog from input image using photutils."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef compute_photometry(catalog, photmode):\n # Determine VEGAMAG zero-point using pysynphot for this photmode\n photmode = photmode.replace(' ', ', ')\n vega = S.FileSpectrum(VEGASPEC)\n bp = S.ObsBandpass(photmode)\n vegauvis = S.Observation(vega, bp)\n vegazpt = 2.5 * np.log10(vegauvis.countrate())\n\n # Use zero-point to convert flux values from catalog into magnitudes\n # source_phot = vegazpt - 2.5*np.log10(catalog['source_sum'])\n source_phot = vegazpt - 2.5 * np.log10(catalog['flux'])\n source_phot.name = 'vegamag'\n # Now add this new column to the catalog table\n catalog.add_column(source_phot)\n\n return catalog", "response": "Compute magnitudes for sources based on observations photmode."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nfilter the input catalog by photometry.", "response": "def filter_catalog(catalog, **kwargs):\n \"\"\" Create a new catalog selected from input based on photometry.\n\n Parameters\n ----------\n bright_limit : float\n Fraction of catalog based on brightness that should be retained.\n Value of 1.00 means full catalog.\n\n max_bright : int\n Maximum number of sources to keep regardless of `bright_limit`.\n\n min_bright : int\n Minimum number of sources to keep regardless of `bright_limit`.\n\n colname : str\n Name of column to use for selection/sorting.\n\n Returns\n -------\n new_catalog : `~astropy.table.Table`\n New table which only has the sources that meet the selection criteria.\n \"\"\"\n # interpret input pars\n bright_limit = kwargs.get('bright_limit', 1.00)\n max_bright = kwargs.get('max_bright', None)\n min_bright = kwargs.get('min_bright', 20)\n colname = kwargs.get('colname', 'vegamag')\n\n # sort by magnitude\n phot_column = catalog[colname]\n num_sources = len(phot_column)\n sort_indx = np.argsort(phot_column)\n if max_bright is None:\n max_bright = num_sources\n\n # apply limits, insuring no more than full catalog gets selected\n limit_num = max(int(num_sources * bright_limit), min_bright)\n limit_num = min(max_bright, limit_num, num_sources)\n\n # Extract sources identified by selection\n new_catalog = catalog[sort_indx[:limit_num]]\n\n return new_catalog"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef read_hlet_wcs(filename, ext):\n hstwcs = wcsutil.HSTWCS(filename, ext=ext)\n if hstwcs.naxis1 is None:\n hstwcs.naxis1 = int(hstwcs.wcs.crpix[0] * 2.) # Assume crpix is center of chip\n hstwcs.naxis2 = int(hstwcs.wcs.crpix[1] * 2.)\n\n return hstwcs", "response": "Read a HSTWCS file and return a WCS object."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef build_hstwcs(crval1, crval2, crpix1, crpix2, naxis1, naxis2, pscale, orientat):\n wcsout = wcsutil.HSTWCS()\n wcsout.wcs.crval = np.array([crval1, crval2])\n wcsout.wcs.crpix = np.array([crpix1, crpix2])\n wcsout.naxis1 = naxis1\n wcsout.naxis2 = naxis2\n wcsout.wcs.cd = buildRotMatrix(orientat) * [-1, 1] * pscale / 3600.0\n # Synchronize updates with astropy.wcs objects\n wcsout.wcs.set()\n wcsout.setPscale()\n wcsout.setOrient()\n wcsout.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n\n return wcsout", "response": "Build an astropy. wcsutil. HSTWCS object for a default instrument with the given crval pix1 and pix2."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef within_footprint(img, wcs, x, y):\n # start with limits of WCS shape\n if hasattr(wcs, 'naxis1'):\n naxis1 = wcs.naxis1\n naxis2 = wcs.naxis2\n elif hasattr(wcs, 'pixel_shape'):\n naxis1, naxis2 = wcs.pixel_shape\n else:\n naxis1 = wcs._naxis1\n naxis2 = wcs._naxis2\n maskx = np.bitwise_or(x < 0, x > naxis1)\n masky = np.bitwise_or(y < 0, y > naxis2)\n mask = ~np.bitwise_or(maskx, masky)\n x = x[mask]\n y = y[mask]\n\n # Now, confirm that these points fall within actual science area of WCS\n img_mask = create_image_footprint(img, wcs, border=1.0)\n inmask = np.where(img_mask[y.astype(np.int32), x.astype(np.int32)])[0]\n x = x[inmask]\n y = y[inmask]\n return x, y", "response": "Determine whether input x y fall within the science area of the image."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncreating the footprint of the image in the reference WCS frame.", "response": "def create_image_footprint(image, refwcs, border=0.):\n \"\"\" Create the footprint of the image in the reference WCS frame.\n\n Parameters\n ----------\n image : `astropy.io.fits.HDUList` or str\n Image to extract sources for matching to\n the external astrometric catalog.\n\n refwcs : `stwcs.wcsutil.HSTWCS`\n Reference WCS for coordinate frame of image.\n\n border : float\n Buffer (in arcseconds) around edge of image to exclude astrometric\n sources.\n\n \"\"\"\n # Interpret input image to generate initial source catalog and WCS\n if isinstance(image, str):\n image = pf.open(image)\n numSci = countExtn(image, extname='SCI')\n ref_x = refwcs._naxis1\n ref_y = refwcs._naxis2\n # convert border value into pixels\n border_pixels = int(border / refwcs.pscale)\n\n mask_arr = np.zeros((ref_y, ref_x), dtype=int)\n\n for chip in range(numSci):\n chip += 1\n # Build arrays of pixel positions for all edges of chip\n chip_y, chip_x = image['sci', chip].data.shape\n chipwcs = wcsutil.HSTWCS(image, ext=('sci', chip))\n xpix = np.arange(chip_x) + 1\n ypix = np.arange(chip_y) + 1\n edge_x = np.hstack([[1] * chip_y, xpix, [chip_x] * chip_y, xpix])\n edge_y = np.hstack([ypix, [1] * chip_x, ypix, [chip_y] * chip_x])\n edge_ra, edge_dec = chipwcs.all_pix2world(edge_x, edge_y, 1)\n edge_x_out, edge_y_out = refwcs.all_world2pix(edge_ra, edge_dec, 0)\n edge_x_out = np.clip(edge_x_out.astype(np.int32), 0, ref_x - 1)\n edge_y_out = np.clip(edge_y_out.astype(np.int32), 0, ref_y - 1)\n mask_arr[edge_y_out, edge_x_out] = 1\n\n # Fill in outline of each chip\n mask_arr = ndimage.binary_fill_holes(ndimage.binary_dilation(mask_arr, iterations=2))\n\n if border > 0.:\n mask_arr = ndimage.binary_erosion(mask_arr, iterations=border_pixels)\n\n return mask_arr"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nbuild a list of WCS objects for all chips in an image and a set of source catalogs.", "response": "def build_wcscat(image, group_id, source_catalog):\n \"\"\" Return a list of `~tweakwcs.tpwcs.FITSWCS` objects for all chips in an image.\n\n Parameters\n ----------\n image : str, ~astropy.io.fits.HDUList`\n Either filename or HDUList of a single HST observation.\n\n group_id : int\n Integer ID for group this image should be associated with; primarily\n used when separate chips are in separate files to treat them all as one\n exposure.\n\n source_catalog : dict\n If provided, these catalogs will be attached as `catalog`\n entries in each chip's ``FITSWCS`` object. It should be provided as a\n dict of astropy Tables identified by chip number with\n each table containing sources from image extension ``('sci', chip)`` as\n generated by `generate_source_catalog()`.\n\n Returns\n -------\n wcs_catalogs : list of `~tweakwcs.tpwcs.FITSWCS`\n List of `~tweakwcs.tpwcs.FITSWCS` objects defined for all chips in input image.\n\n \"\"\"\n open_file = False\n if isinstance(image, str):\n hdulist = pf.open(image)\n open_file = True\n elif isinstance(image, pf.HDUList):\n hdulist = image\n else:\n log.info(\"Wrong type of input, {}, for build_wcscat...\".format(type(image)))\n raise ValueError\n\n wcs_catalogs = []\n numsci = countExtn(hdulist)\n for chip in range(1, numsci + 1):\n w = wcsutil.HSTWCS(hdulist, ('SCI', chip))\n\n imcat = source_catalog[chip]\n # rename xcentroid/ycentroid columns, if necessary, to be consistent with tweakwcs\n if 'xcentroid' in imcat.colnames:\n imcat.rename_column('xcentroid', 'x')\n imcat.rename_column('ycentroid', 'y')\n\n wcscat = FITSWCS(\n w,\n meta={\n 'chip': chip,\n 'group_id': group_id,\n 'filename': image,\n 'catalog': imcat,\n 'name': image\n }\n )\n\n wcs_catalogs.append(wcscat)\n\n if open_file:\n hdulist.close()\n\n return wcs_catalogs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate headers of all images specified in shiftfile with shifts from shiftfile.", "response": "def update_from_shiftfile(shiftfile,wcsname=None,force=False):\n \"\"\"\n Update headers of all images specified in shiftfile with shifts\n from shiftfile.\n\n Parameters\n ----------\n shiftfile : str\n Filename of shiftfile.\n\n wcsname : str\n Label to give to new WCS solution being created by this fit. If\n a value of None is given, it will automatically use 'TWEAK' as the\n label. [Default =None]\n\n force : bool\n Update header even though WCS already exists with this solution or\n wcsname? [Default=False]\n\n \"\"\"\n f = open(fileutil.osfn(shiftfile))\n shift_lines = [x.strip() for x in f.readlines()]\n f.close()\n\n # interpret header of shift file\n for line in shift_lines:\n if 'refimage' in line or 'reference' in line:\n refimage = line.split(':')[-1]\n refimage = refimage[:refimage.find('[wcs]')].lstrip()\n break\n\n # Determine the max length in the first column (filenames)\n fnames = []\n for row in shift_lines:\n if row[0] == '#': continue\n fnames.append(len(row.split(' ')[0]))\n fname_fmt = 'S{0}'.format(max(fnames))\n\n # Now read in numerical values from shiftfile\n type_list = {'names':('fnames','xsh','ysh','rot','scale','xrms','yrms'),\n 'formats':(fname_fmt,'f4','f4','f4','f4','f4','f4')}\n try:\n sdict = np.loadtxt(shiftfile,dtype=type_list,unpack=False)\n except IndexError:\n tlist = {'names':('fnames','xsh','ysh','rot','scale'),\n 'formats':(fname_fmt,'f4','f4','f4','f4')}\n s = np.loadtxt(shiftfile,dtype=tlist,unpack=False)\n sdict = np.zeros([s['fnames'].shape[0],],dtype=type_list)\n for sname in s.dtype.names:\n sdict[sname] = s[sname]\n\n for img in sdict:\n updatewcs_with_shift(img['fnames'], refimage, wcsname=wcsname,\n rot=img['rot'], scale=img['scale'],\n xsh=img['xsh'], ysh=img['ysh'],\n xrms=img['xrms'], yrms=img['yrms'],\n force=force)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef updatewcs_with_shift(image,reference,wcsname=None, reusename=False,\n fitgeom='rscale',\n rot=0.0,scale=1.0,xsh=0.0,ysh=0.0,fit=None,\n xrms=None, yrms = None,\n verbose=False,force=False,sciext='SCI'):\n\n \"\"\"\n Update the SCI headers in 'image' based on the fit provided as determined\n in the WCS specified by 'reference'. The fit should be a 2-D matrix as\n generated for use with 'make_vector_plot()'.\n\n Notes\n -----\n The algorithm used to apply the provided fit solution to the image\n involves applying the following steps to the WCS of each of the\n input image's chips:\n\n 1. compute RA/Dec with full distortion correction for\n reference point as (Rc_i,Dc_i)\n\n 2. find the Xc,Yc for each Rc_i,Dc_i and get the difference from the\n CRPIX position for the reference WCS as (dXc_i,dYc_i)\n\n 3. apply fit (rot&scale) to (dXc_i,dYc_i) then apply shift, then add\n CRPIX back to get new (Xcs_i,Ycs_i) position\n\n 4. compute (Rcs_i,Dcs_i) as the sky coordinates for (Xcs_i,Ycs_i)\n\n 5. compute delta of (Rcs_i-Rc_i, Dcs_i-Dcs_i) as (dRcs_i,dDcs_i)\n\n 6. apply the fit to the chip's undistorted CD matrix, the apply linear\n distortion terms back in to create a new CD matrix\n\n 7. add (dRcs_i,dDcs_i) to CRVAL of the reference chip's WCS\n\n 8. update header with new WCS values\n\n Parameters\n ----------\n image : str or PyFITS.HDUList object\n Filename, or PyFITS object, of image with WCS to be updated.\n All extensions with EXTNAME matches the value of the 'sciext'\n parameter value (by default, all 'SCI' extensions) will be updated.\n\n reference : str\n Filename of image/headerlet (FITS file) which contains the WCS\n used to define the tangent plane in which all the fit parameters\n (shift, rot, scale) were measured.\n\n wcsname : str\n Label to give to new WCS solution being created by this fit. If\n a value of None is given, it will automatically use 'TWEAK' as the\n label. If a WCS has a name with this specific value, the code will\n automatically append a version ID using the format '_n', such as\n 'TWEAK_1', 'TWEAK_2',or 'TWEAK_update_1'.\n [Default =None]\n\n reusename : bool\n User can specify whether or not to over-write WCS with same name.\n [Default: False]\n\n rot : float\n Amount of rotation measured in fit to be applied.\n [Default=0.0]\n\n scale : float\n Amount of scale change measured in fit to be applied.\n [Default=1.0]\n\n xsh : float\n Offset in X pixels from defined tangent plane to be applied to image.\n [Default=0.0]\n\n ysh : float\n Offset in Y pixels from defined tangent plane to be applied to image.\n [Default=0.0]\n\n fit : arr\n Linear coefficients for fit\n [Default = None]\n\n xrms : float\n RMS of fit in RA (in decimal degrees) that will be recorded as\n CRDER1 in WCS and header\n [Default = None]\n\n yrms : float\n RMS of fit in Dec (in decimal degrees) that will be recorded as\n CRDER2 in WCS and header\n [Default = None]\n\n verbose : bool\n Print extra messages during processing? [Default=False]\n\n force : bool\n Update header even though WCS already exists with this solution or\n wcsname? [Default=False]\n\n sciext : string\n Value of FITS EXTNAME keyword for extensions with WCS headers to\n be updated with the fit values. [Default='SCI']\n\n \"\"\"\n # if input reference is a ref_wcs file from tweakshifts, use it\n if isinstance(reference, wcsutil.HSTWCS) or isinstance(reference, pywcs.WCS):\n wref = reference\n else:\n refimg = fits.open(reference, memmap=False)\n wref = None\n for extn in refimg:\n if 'extname' in extn.header and extn.header['extname'] == 'WCS':\n wref = pywcs.WCS(refimg['wcs'].header)\n break\n refimg.close()\n # else, we have presumably been provided a full undistorted image\n # as a reference, so use it with HSTWCS instead\n if wref is None:\n wref = wcsutil.HSTWCS(reference)\n\n if isinstance(image, fits.HDUList):\n open_image = False\n filename = image.filename()\n if image.fileinfo(0)['filemode'] is 'update':\n image_update = True\n else:\n image_update = False\n else:\n open_image = True\n filename = image\n image_update = None\n\n # Now that we are sure we have a good reference WCS to use,\n # continue with the update\n logstr = \"....Updating header for {:s}...\".format(filename)\n if verbose:\n print(\"\\n{:s}\\n\".format(logstr))\n else:\n log.info(logstr)\n\n # reset header WCS keywords to original (OPUS generated) values\n extlist = get_ext_list(image, extname='SCI')\n if extlist:\n if image_update:\n # Create initial WCSCORR extension\n wcscorr.init_wcscorr(image,force=force)\n else:\n extlist = [0]\n\n # insure that input PRIMARY WCS has been archived before overwriting\n # with new solution\n if open_image:\n fimg = fits.open(image, mode='update', memmap=False)\n image_update = True\n else:\n fimg = image\n\n if image_update:\n wcsutil.altwcs.archiveWCS(fimg,extlist,reusekey=True)\n\n # Process MEF images...\n for ext in extlist:\n logstr = \"Processing {:s}[{:s}]\".format(fimg.filename(),\n ext2str(ext))\n if verbose:\n print(\"\\n{:s}\\n\".format(logstr))\n else:\n log.info(logstr)\n chip_wcs = wcsutil.HSTWCS(fimg,ext=ext)\n\n update_refchip_with_shift(chip_wcs, wref, fitgeom=fitgeom,\n rot=rot, scale=scale, xsh=xsh, ysh=ysh,\n fit=fit, xrms=xrms, yrms=yrms)\n #if util.is_blank(wcsname):\n #wcsname = 'TWEAK'\n\n # Update FITS file with newly updated WCS for this chip\n extnum = fimg.index(fimg[ext])\n update_wcs(fimg, extnum, chip_wcs, wcsname=wcsname,\n reusename=reusename, verbose=verbose)\n\n if open_image:\n fimg.close()", "response": "Update the WCS headers in image with the values of the WCS specified by reference."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update_refchip_with_shift(chip_wcs, wcslin, fitgeom='rscale',\n rot=0.0, scale=1.0, xsh=0.0, ysh=0.0,\n fit=None, xrms=None, yrms=None):\n \"\"\" Compute the matrix for the scale and rotation correction\n\n Parameters\n ----------\n chip_wcs: wcs object\n HST of the input image\n wcslin: wcs object\n Reference WCS from which the offsets/rotations are determined\n fitgeom: str\n NOT USED\n rot : float\n Amount of rotation measured in fit to be applied.\n [Default=0.0]\n scale : float\n Amount of scale change measured in fit to be applied.\n [Default=1.0]\n xsh : float\n Offset in X pixels from defined tangent plane to be applied to image.\n [Default=0.0]\n ysh : float\n Offset in Y pixels from defined tangent plane to be applied to image.\n [Default=0.0]\n fit : arr\n Linear coefficients for fit\n [Default = None]\n xrms : float\n RMS of fit in RA (in decimal degrees) that will be recorded as\n CRDER1 in WCS and header\n [Default = None]\n yrms : float\n RMS of fit in Dec (in decimal degrees) that will be recorded as\n CRDER2 in WCS and header\n [Default = None]\n \"\"\"\n # compute the matrix for the scale and rotation correction\n if fit is None:\n fit = linearfit.buildFitMatrix(rot, scale)\n\n shift = np.asarray([xsh, ysh]) - np.dot(wcslin.wcs.crpix, fit) + wcslin.wcs.crpix\n\n fit = np.linalg.inv(fit).T\n\n cwcs = chip_wcs.deepcopy()\n cd_eye = np.eye(chip_wcs.wcs.cd.shape[0], dtype=ndfloat128)\n zero_shift = np.zeros(2, dtype=ndfloat128)\n\n naxis1, naxis2 = chip_wcs.pixel_shape\n\n # estimate precision necessary for iterative processes:\n maxiter = 100\n crpix2corners = np.dstack([i.flatten() for i in np.meshgrid(\n [1, naxis1], [1, naxis2])])[0] - chip_wcs.wcs.crpix\n maxUerr = 1.0e-5 / np.amax(np.linalg.norm(crpix2corners, axis=1))\n\n # estimate step for numerical differentiation. We need a step\n # large enough to avoid rounding errors and small enough to get a\n # better precision for numerical differentiation.\n # TODO: The logic below should be revised at a later time so that it\n # better takes into account the two competing requirements.\n hx = max(1.0, min(20.0, (chip_wcs.wcs.crpix[0] - 1.0) / 100.0,\n (naxis1 - chip_wcs.wcs.crpix[0]) / 100.0))\n hy = max(1.0, min(20.0, (chip_wcs.wcs.crpix[1] - 1.0) / 100.0,\n (naxis2 - chip_wcs.wcs.crpix[1]) / 100.0))\n\n # compute new CRVAL for the image WCS:\n crpixinref = wcslin.wcs_world2pix(\n chip_wcs.wcs_pix2world([chip_wcs.wcs.crpix],1),1)\n crpixinref = np.dot(fit, (crpixinref - shift).T).T\n chip_wcs.wcs.crval = wcslin.wcs_pix2world(crpixinref, 1)[0]\n chip_wcs.wcs.set()\n\n # initial approximation for CD matrix of the image WCS:\n (U, u) = linearize(cwcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,\n fit, shift, hx=hx, hy=hy)\n err0 = np.amax(np.abs(U-cd_eye)).astype(np.float64)\n chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd.astype(ndfloat128), U).astype(np.float64)\n chip_wcs.wcs.set()\n\n # NOTE: initial solution is the exact mathematical solution (modulo numeric\n # differentiation). However, e.g., due to rounding errors, approximate\n # numerical differentiation, the solution may be improved by performing\n # several iterations. The next step will try to perform\n # fixed-point iterations to \"improve\" the solution\n # but this is not really required.\n\n # Perform fixed-point iterations to improve the approximation\n # for CD matrix of the image WCS (actually for the U matrix).\n for i in range(maxiter):\n (U, u) = linearize(chip_wcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,\n cd_eye, zero_shift, hx=hx, hy=hy)\n err = np.amax(np.abs(U-cd_eye)).astype(np.float64)\n if err > err0:\n break\n chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd, U).astype(np.float64)\n chip_wcs.wcs.set()\n if err < maxUerr:\n break\n err0 = err\n\n if xrms is not None:\n chip_wcs.wcs.crder = np.array([xrms,yrms])", "response": "Update the refchip with the shift of the input image."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update_wcs(image,extnum,new_wcs,wcsname=\"\",reusename=False,verbose=False):\n # Start by insuring that the correct value of 'orientat' has been computed\n new_wcs.setOrient()\n\n fimg_open=False\n if not isinstance(image, fits.HDUList):\n fimg = fits.open(image, mode='update', memmap=False)\n fimg_open = True\n fimg_update = True\n else:\n fimg = image\n if fimg.fileinfo(0)['filemode'] is 'update':\n fimg_update = True\n else:\n fimg_update = False\n\n # Determine final (unique) WCSNAME value, either based on the default or\n # user-provided name\n if util.is_blank(wcsname):\n wcsname = 'TWEAK'\n if not reusename:\n wcsname = create_unique_wcsname(fimg, extnum, wcsname)\n\n idchdr = True\n if new_wcs.idcscale is None:\n idchdr = False\n # Open the file for updating the WCS\n try:\n logstr = 'Updating header for %s[%s]'%(fimg.filename(),str(extnum))\n if verbose:\n print(logstr)\n else:\n log.info(logstr)\n\n hdr = fimg[extnum].header\n\n if verbose:\n log.info(' with WCS of')\n new_wcs.printwcs()\n print(\"WCSNAME : \",wcsname)\n\n # Insure that if a copy of the WCS has not been created yet, it will be now\n wcs_hdr = new_wcs.wcs2header(idc2hdr=idchdr, relax=True)\n\n for key in wcs_hdr:\n hdr[key] = wcs_hdr[key]\n hdr['ORIENTAT'] = new_wcs.orientat\n hdr['WCSNAME'] = wcsname\n util.updateNEXTENDKw(fimg)\n\n # Only if this image was opened in update mode should this\n # newly updated WCS be archived, as it will never be written out\n # to a file otherwise.\n if fimg_update:\n if not reusename:\n # Save the newly updated WCS as an alternate WCS as well\n wkey = wcsutil.altwcs.next_wcskey(fimg,ext=extnum)\n else:\n wkey = wcsutil.altwcs.getKeyFromName(hdr,wcsname)\n\n # wcskey needs to be specified so that archiveWCS will create a\n # duplicate WCS with the same WCSNAME as the Primary WCS\n wcsutil.altwcs.archiveWCS(fimg,[extnum],wcsname=wcsname,\n wcskey=wkey, reusekey=reusename)\n finally:\n if fimg_open:\n # finish up by closing the file now\n fimg.close()", "response": "Updates the WCS of the specified extension number with the new WCS object."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ndetermining the size of the thread pool for the user - specified cfg value.", "response": "def get_pool_size(usr_config_value, num_tasks):\n \"\"\" Determine size of thread/process-pool for parallel processing.\n This examines the cpu_count to decide and return the right pool\n size to use. Also take into account the user's wishes via the config\n object value, if specified. On top of that, don't allow the pool size\n returned to be any higher than the number of parallel tasks, if specified.\n Only use what we need (mp.Pool starts pool_size processes, needed or not).\n If number of tasks is unknown, call this with \"num_tasks\" set to None.\n Returns 1 when indicating that parallel processing should not be used.\n Consolidate all such logic here, not in the caller. \"\"\"\n\n if not can_parallel:\n return 1\n # Give priority to their specified cfg value, over the actual cpu count\n if usr_config_value is not None:\n if num_tasks is None:\n return usr_config_value\n else:\n # usr_config_value may be needlessly high\n return min(usr_config_value, num_tasks)\n # they haven't specified a cfg value, so go with the cpu_count\n if num_tasks is None:\n return _cpu_count\n else:\n # run no more workers than tasks\n return min(_cpu_count, num_tasks)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef init_logging(logfile=DEFAULT_LOGNAME, default=None, level=logging.INFO):\n\n if logfile == \"INDEF\":\n if not is_blank(default):\n logname = fileutil.buildNewRootname(default, '.log')\n else:\n logname = DEFAULT_LOGNAME\n elif logfile not in [None, \"\" , \" \"]:\n if logfile.endswith('.log'):\n logname = logfile\n else:\n logname = logfile + '.log'\n else:\n logname = None\n\n if logname is not None:\n logutil.setup_global_logging()\n # Don't use logging.basicConfig since it can only be called once in a\n # session\n # TODO: Would be fine to use logging.config.dictConfig, but it's not\n # available in Python 2.5\n global _log_file_handler\n root_logger = logging.getLogger()\n if _log_file_handler:\n root_logger.removeHandler(_log_file_handler)\n # Default mode is 'a' which is fine\n _log_file_handler = logging.FileHandler(logname)\n # TODO: Make the default level configurable in the task parameters\n _log_file_handler.setLevel(level)\n _log_file_handler.setFormatter(\n logging.Formatter('[%(levelname)-8s] %(message)s'))\n root_logger.setLevel(level)\n root_logger.addHandler(_log_file_handler)\n\n print('Setting up logfile : ', logname)\n\n #stdout_logger = logging.getLogger('stsci.tools.logutil.stdout')\n # Disable display of prints to stdout from all packages except\n # drizzlepac\n #stdout_logger.addFilter(logutil.EchoFilter(include=['drizzlepac']))\n else:\n print('No trailer file created...')", "response": "Initialize logging for the current thread."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef end_logging(filename=None):\n\n if logutil.global_logging_started:\n if filename:\n print('Trailer file written to: ', filename)\n else:\n # This generally shouldn't happen if logging was started with\n # init_logging and a filename was given...\n print('No trailer file saved...')\n\n logutil.teardown_global_logging()\n else:\n print('No trailer file saved...')", "response": "Ends logging and restores system defaults."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nfind the rootname of the given file.", "response": "def findrootname(filename):\n \"\"\"\n Return the rootname of the given file.\n \"\"\"\n\n puncloc = [filename.find(char) for char in string.punctuation]\n if sys.version_info[0] >= 3:\n val = sys.maxsize\n else:\n val = sys.maxint\n for num in puncloc:\n if num !=-1 and num < val:\n val = num\n return filename[0:val]"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef removeFileSafely(filename,clobber=True):\n if filename is not None and filename.strip() != '':\n if os.path.exists(filename) and clobber: os.remove(filename)", "response": "Delete the file specified but only if it exists and clobber is True."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ndisplaying a warning box for the input parameter.", "response": "def displayEmptyInputWarningBox(display=True, parent=None):\n \"\"\" Displays a warning box for the 'input' parameter.\n \"\"\"\n if sys.version_info[0] >= 3:\n from tkinter.messagebox import showwarning\n else:\n from tkMessageBox import showwarning\n\n if display:\n msg = 'No valid input files found! '+\\\n 'Please check the value for the \"input\" parameter.'\n showwarning(parent=parent,message=msg, title=\"No valid inputs!\")\n return \"yes\""} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns the number of SCI extensions and the EXTNAME from a input MEF file.", "response": "def count_sci_extensions(filename):\n \"\"\" Return the number of SCI extensions and the EXTNAME from a input MEF file.\n \"\"\"\n num_sci = 0\n extname = 'SCI'\n\n hdu_list = fileutil.openImage(filename, memmap=False)\n\n for extn in hdu_list:\n if 'extname' in extn.header and extn.header['extname'] == extname:\n num_sci += 1\n\n if num_sci == 0:\n extname = 'PRIMARY'\n num_sci = 1\n\n hdu_list.close()\n\n return num_sci,extname"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncheck whether or not the specified WCSNAME already exists in the file fname.", "response": "def verifyUniqueWcsname(fname,wcsname):\n \"\"\"\n Report whether or not the specified WCSNAME already exists in the file\n \"\"\"\n uniq = True\n numsci,extname = count_sci_extensions(fname)\n wnames = altwcs.wcsnames(fname,ext=(extname,1))\n\n if wcsname in wnames.values():\n uniq = False\n\n return uniq"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nverify that WCSNAME is present in the file and if it is not present in all SCI extensions and return True if it is present.", "response": "def verifyUpdatewcs(fname):\n \"\"\"\n Verify the existence of WCSNAME in the file. If it is not present,\n report this to the user and raise an exception. Returns True if WCSNAME\n was found in all SCI extensions.\n \"\"\"\n updated = True\n numsci,extname = count_sci_extensions(fname)\n for n in range(1,numsci+1):\n hdr = fits.getheader(fname, extname=extname, extver=n, memmap=False)\n if 'wcsname' not in hdr:\n updated = False\n break\n return updated"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef verifyRefimage(refimage):\n valid = True\n\n # start by trying to see whether the code can even find the file\n if is_blank(refimage):\n valid=True\n return valid\n\n refroot,extroot = fileutil.parseFilename(refimage)\n if not os.path.exists(refroot):\n valid = False\n return valid\n\n # if a MEF has been specified, make sure extension contains a valid WCS\n if valid:\n if extroot is None:\n extn = findWCSExtn(refimage)\n if extn is None:\n valid = False\n else:\n valid = True\n else:\n # check for CD matrix in WCS object\n refwcs = wcsutil.HSTWCS(refimage)\n if not refwcs.wcs.has_cd():\n valid = False\n else:\n valid = True\n del refwcs\n\n return valid", "response": "Verify that the value of refimage is a valid WCS object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef findWCSExtn(filename):\n rootname,extroot = fileutil.parseFilename(filename)\n extnum = None\n if extroot is None:\n fimg = fits.open(rootname, memmap=False)\n for i,extn in enumerate(fimg):\n if 'crval1' in extn.header:\n refwcs = wcsutil.HSTWCS('{}[{}]'.format(rootname,i))\n if refwcs.wcs.has_cd():\n extnum = '{}'.format(i)\n break\n fimg.close()\n else:\n try:\n refwcs = wcsutil.HSTWCS(filename)\n if refwcs.wcs.has_cd():\n extnum = extroot\n except:\n extnum = None\n\n return extnum", "response": "Find the first valid WCS object in a file with the same name as the user - supplied filename."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nverify that images specified in filelist can be updated.", "response": "def verifyFilePermissions(filelist, chmod=True):\n \"\"\" Verify that images specified in 'filelist' can be updated.\n\n A message will be printed reporting the names of any images which\n do not have write-permission, then quit.\n \"\"\"\n badfiles = []\n archive_dir = False\n for img in filelist:\n fname = fileutil.osfn(img)\n if 'OrIg_files' in os.path.split(fname)[0]:\n archive_dir = True\n try:\n fp = open(fname,mode='a')\n fp.close()\n except IOError as e:\n if e.errno == errno.EACCES:\n badfiles.append(img)\n # Not a permission error.\n pass\n\n num_bad = len(badfiles)\n if num_bad > 0:\n if archive_dir:\n print('\\n')\n print('#'*40)\n print(' Working in \"OrIg_files\" (archive) directory. ')\n print(' This directory has been created to serve as an archive')\n print(' for the original input images. ')\n print('\\n These files should be copied into another directory')\n print(' for processing. ')\n print('#'*40)\n\n print('\\n')\n print('#'*40)\n print('Found %d files which can not be updated!'%(num_bad))\n for img in badfiles:\n print(' %s'%(img))\n print('\\nPlease reset permissions for these files and restart...')\n print('#'*40)\n print('\\n')\n filelist = None\n\n return filelist"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn a list of all parameter names included in the configObj regardless of which section the parameter was stored in the configObj.", "response": "def getFullParList(configObj):\n \"\"\"\n Return a single list of all parameter names included in the configObj\n regardless of which section the parameter was stored\n \"\"\"\n plist = []\n for par in configObj.keys():\n if isinstance(configObj[par],configobj.Section):\n plist.extend(getFullParList(configObj[par]))\n else:\n plist.append(par)\n return plist"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nvalidates the input parameters of the user.", "response": "def validateUserPars(configObj,input_dict):\n \"\"\" Compares input parameter names specified by user with those already\n recognized by the task.\n\n Any parameters provided by the user that does not match a known\n task parameter will be reported and a ValueError exception will be\n raised.\n \"\"\"\n # check to see whether any input parameters are unexpected.\n # Any unexpected parameters provided on input should be reported and\n # the code should stop\n plist = getFullParList(configObj)\n extra_pars = []\n for kw in input_dict:\n if kw not in plist:\n extra_pars.append(kw)\n if len(extra_pars) > 0:\n print ('='*40)\n print ('The following input parameters were not recognized as valid inputs:')\n for p in extra_pars:\n print(\" %s\"%(p))\n print('\\nPlease check the spelling of the parameter(s) and try again...')\n print('='*40)\n raise ValueError"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\napplies logic to turn on use of user - specified output WCS if user provides any parameter on command - line regardless of how final_wcs was set.", "response": "def applyUserPars_steps(configObj, input_dict, step='3a'):\n \"\"\" Apply logic to turn on use of user-specified output WCS if user provides\n any parameter on command-line regardless of how final_wcs was set.\n \"\"\"\n step_kws = {'7a': 'final_wcs', '3a': 'driz_sep_wcs'}\n stepname = getSectionName(configObj,step)\n finalParDict = configObj[stepname].copy()\n del finalParDict[step_kws[step]]\n\n # interpret input_dict to find any parameters for this step specified by the user\n user_pars = {}\n for kw in finalParDict:\n if kw in input_dict: user_pars[kw] = input_dict[kw]\n if len(user_pars) > 0:\n configObj[stepname][step_kws[step]] = True"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef getDefaultConfigObj(taskname,configObj,input_dict={},loadOnly=True):\n if configObj is None:\n # Start by grabbing the default values without using the GUI\n # This insures that all subsequent use of the configObj includes\n # all parameters and their last saved values\n configObj = teal.load(taskname)\n elif isinstance(configObj,str):\n if configObj.lower().strip() == 'defaults':\n # Load task default .cfg file with all default values\n configObj = teal.load(taskname,defaults=True)\n # define default filename for configObj\n configObj.filename = taskname.lower()+'.cfg'\n else:\n # Load user-specified .cfg file with its special default values\n # we need to call 'fileutil.osfn()' to insure all environment\n # variables specified by the user in the configObj filename are\n # expanded to the full path\n configObj = teal.load(fileutil.osfn(configObj))\n\n # merge in the user values for this run\n # this, though, does not save the results for use later\n if input_dict not in [None,{}]:# and configObj not in [None, {}]:\n # check to see whether any input parameters are unexpected.\n # Any unexpected parameters provided on input should be reported and\n # the code should stop\n validateUserPars(configObj,input_dict)\n\n # If everything looks good, merge user inputs with configObj and continue\n cfgpars.mergeConfigObj(configObj, input_dict)\n # Update the input .cfg file with the updated parameter values\n #configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))\n #configObj.write()\n\n if not loadOnly:\n # We want to run the GUI AFTER merging in any parameters\n # specified by the user on the command-line and provided in\n # input_dict\n configObj = teal.teal(configObj,loadOnly=False)\n\n return configObj", "response": "Returns a configObj instance for the given task and the given configObj."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn section name based on step number.", "response": "def getSectionName(configObj,stepnum):\n \"\"\" Return section label based on step number.\n \"\"\"\n for key in configObj.keys():\n if key.find('STEP '+str(stepnum)+':') >= 0:\n return key"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef displayMakewcsWarningBox(display=True, parent=None):\n if sys.version_info[0] >= 3:\n from tkinter.messagebox import showwarning\n else:\n from tkMessageBox import showwarning\n\n ans = {'yes':True,'no':False}\n if ans[display]:\n msg = 'Setting \"updatewcs=yes\" will result '+ \\\n 'in all input WCS values to be recomputed '+ \\\n 'using the original distortion model and alignment.'\n showwarning(parent=parent,message=msg, title=\"WCS will be overwritten!\")\n return True", "response": "Displays a warning box for the makewcs parameter."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef printParams(paramDictionary, all=False, log=None):\n\n if log is not None:\n def output(msg):\n log.info(msg)\n else:\n def output(msg):\n print(msg)\n\n if not paramDictionary:\n output('No parameters were supplied')\n else:\n for key in sorted(paramDictionary):\n if all or (not isinstance(paramDictionary[key], dict)) \\\n and key[0] != '_':\n output('\\t' + '\\t'.join([str(key) + ' :',\n str(paramDictionary[key])]))\n if log is None:\n output('\\n')", "response": "Print nicely the parameters from the dictionary."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning True if the input is a comma separated list of names.", "response": "def isCommaList(inputFilelist):\n \"\"\"Return True if the input is a comma separated list of names.\"\"\"\n if isinstance(inputFilelist, int) or isinstance(inputFilelist, np.int32):\n ilist = str(inputFilelist)\n else:\n ilist = inputFilelist\n if \",\" in ilist:\n return True\n return False"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef loadFileList(inputFilelist):\n f = open(inputFilelist[1:])\n # check the first line in order to determine whether\n # IVM files have been specified in a second column...\n lines = f.readline()\n f.close()\n\n # If there is a second column...\n if len(line.split()) == 2:\n # ...parse out the names of the IVM files as well\n ivmlist = irafglob.irafglob(input, atfile=atfile_ivm)\n\n # Parse the @-file with irafglob to extract the input filename\n filelist = irafglob.irafglob(input, atfile=atfile_sci)\n return filelist", "response": "Load the IVM files from the first two columns and return the list of IVM files."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef readCommaList(fileList):\n names=fileList.split(',')\n fileList=[]\n for item in names:\n fileList.append(item)\n return fileList", "response": "Return a list of the files with the commas removed."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef update_input(filelist, ivmlist=None, removed_files=None):\n newfilelist = []\n\n if removed_files == []:\n return filelist, ivmlist\n else:\n sci_ivm = list(zip(filelist, ivmlist))\n for f in removed_files:\n result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ]\n ivmlist = [el[1] for el in sci_ivm]\n newfilelist = [el[0] for el in sci_ivm]\n return newfilelist, ivmlist", "response": "Updates the input list with the files removed from the input list."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ngetting the expstart and exppend from the header", "response": "def get_expstart(header,primary_hdr):\n \"\"\"shouldn't this just be defined in the instrument subclass of imageobject?\"\"\"\n\n if 'expstart' in primary_hdr:\n exphdr = primary_hdr\n else:\n exphdr = header\n\n if 'EXPSTART' in exphdr:\n expstart = float(exphdr['EXPSTART'])\n expend = float(exphdr['EXPEND'])\n else:\n expstart = 0.\n expend = 0.0\n\n return (expstart,expend)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef compute_texptime(imageObjectList):\n expnames = []\n exptimes = []\n start = []\n end = []\n for img in imageObjectList:\n expnames += img.getKeywordList('_expname')\n exptimes += img.getKeywordList('_exptime')\n start += img.getKeywordList('_expstart')\n end += img.getKeywordList('_expend')\n\n exptime = 0.\n expstart = min(start)\n expend = max(end)\n exposure = None\n for n in range(len(expnames)):\n if expnames[n] != exposure:\n exposure = expnames[n]\n exptime += exptimes[n]\n\n return (exptime,expstart,expend)", "response": "Compute the exposure time for all the members in the specified imageObjectList."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef computeRange(corners):\n x = corners[:, 0]\n y = corners[:, 1]\n _xrange = (np.minimum.reduce(x), np.maximum.reduce(x))\n _yrange = (np.minimum.reduce(y), np.maximum.reduce(y))\n return _xrange, _yrange", "response": "Compute the range spanned by an array of pixel positions."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getRotatedSize(corners, angle):\n if angle:\n _rotm = fileutil.buildRotMatrix(angle)\n # Rotate about the center\n _corners = np.dot(corners, _rotm)\n else:\n # If there is no rotation, simply return original values\n _corners = corners\n\n return computeRange(_corners)", "response": "Determine the size of a rotated meta image."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef readcols(infile, cols=[0, 1, 2, 3], hms=False):\n\n fin = open(infile,'r')\n outarr = []\n for l in fin.readlines():\n l = l.strip()\n if len(l) == 0 or len(l.split()) < len(cols) or (len(l) > 0 and l[0] == '#' or (l.find(\"INDEF\") > -1)): continue\n for i in range(10):\n lnew = l.replace(\" \",\" \")\n if lnew == l: break\n else: l = lnew\n lspl = lnew.split(\" \")\n\n if len(outarr) == 0:\n for c in range(len(cols)): outarr.append([])\n\n for c,n in zip(cols,list(range(len(cols)))):\n if not hms:\n val = float(lspl[c])\n else:\n val = lspl[c]\n outarr[n].append(val)\n fin.close()\n for n in range(len(cols)):\n outarr[n] = np.array(outarr[n])\n return outarr", "response": "Read the columns from an ASCII file as numpy arrays."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef parse_colnames(colnames,coords=None):\n cols = []\n if not isinstance(colnames,list):\n colnames = colnames.split(',')\n # parse column names from coords file and match to input values\n if coords is not None and fileutil.isFits(coords)[0]:\n # Open FITS file with table\n ftab = fits.open(coords, memmap=False)\n # determine which extension has the table\n for extn in ftab:\n if isinstance(extn, fits.BinTableHDU):\n # parse column names from table and match to inputs\n cnames = extn.columns.names\n if colnames is not None:\n for c in colnames:\n for name,i in zip(cnames,list(range(len(cnames)))):\n if c == name.lower(): cols.append(i)\n if len(cols) < len(colnames):\n errmsg = \"Not all input columns found in table...\"\n ftab.close()\n raise ValueError(errmsg)\n else:\n cols = cnames[:2]\n break\n ftab.close()\n else:\n for c in colnames:\n if isinstance(c, str):\n if c[0].lower() == 'c': cols.append(int(c[1:])-1)\n else:\n cols.append(int(c))\n else:\n if isinstance(c, int):\n cols.append(c)\n else:\n errmsg = \"Unsupported column names...\"\n raise ValueError(errmsg)\n return cols", "response": "Convert colnames input into list of column numbers."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef createFile(dataArray=None, outfile=None, header=None):\n # Insure that at least a data-array has been provided to create the file\n assert(dataArray is not None), \"Please supply a data array for createFiles\"\n\n try:\n # Create the output file\n fitsobj = fits.HDUList()\n if header is not None:\n try:\n del(header['NAXIS1'])\n del(header['NAXIS2'])\n if 'XTENSION' in header:\n del(header['XTENSION'])\n if 'EXTNAME' in header:\n del(header['EXTNAME'])\n if 'EXTVER' in header:\n del(header['EXTVER'])\n except KeyError:\n pass\n\n if 'NEXTEND' in header:\n header['NEXTEND'] = 0\n\n hdu = fits.PrimaryHDU(data=dataArray, header=header)\n try:\n del hdu.header['PCOUNT']\n del hdu.header['GCOUNT']\n except KeyError:\n pass\n\n else:\n hdu = fits.PrimaryHDU(data=dataArray)\n\n fitsobj.append(hdu)\n if outfile is not None:\n fitsobj.writeto(outfile)\n finally:\n # CLOSE THE IMAGE FILES\n fitsobj.close()\n\n if outfile is not None:\n del fitsobj\n fitsobj = None\n return fitsobj", "response": "Create a FITS file for the given data array and header."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef base_taskname(taskname, packagename=None):\n if not isinstance(taskname, str):\n return taskname\n\n indx = taskname.rfind('.')\n\n if indx >= 0:\n base_taskname = taskname[(indx+1):]\n pkg_name = taskname[:indx]\n else:\n base_taskname = taskname\n pkg_name = ''\n\n assert(True if packagename is None else (packagename == pkg_name))\n\n return base_taskname", "response": "Returns the base name of the task."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef addStep(self,key):\n ptime = _ptime()\n print('==== Processing Step ',key,' started at ',ptime[0])\n self.steps[key] = {'start':ptime}\n self.order.append(key)", "response": "Add information about a new step to the dict of steps that are available for this log entry"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nrecording the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class.", "response": "def endStep(self,key):\n \"\"\"\n Record the end time for the step.\n\n If key==None, simply record ptime as end time for class to represent\n the overall runtime since the initialization of the class.\n \"\"\"\n ptime = _ptime()\n if key is not None:\n self.steps[key]['end'] = ptime\n self.steps[key]['elapsed'] = ptime[1] - self.steps[key]['start'][1]\n self.end = ptime\n\n print('==== Processing Step ',key,' finished at ',ptime[0])\n print('')"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef reportTimes(self):\n\n self.end = _ptime()\n total_time = 0\n print(ProcSteps.__report_header)\n for step in self.order:\n if 'elapsed' in self.steps[step]:\n _time = self.steps[step]['elapsed']\n else:\n _time = 0.0\n total_time += _time\n print(' %20s %0.4f sec.' % (step, _time))\n\n print(' %20s %s' % ('=' * 20, '=' * 20))\n print(' %20s %0.4f sec.' % ('Total', total_time))", "response": "Print out a formatted summary of the elapsed times for all the the\n performed steps."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef run(configObj=None, input_dict={}, loadOnly=False):\n # If called from interactive user-interface, configObj will not be\n # defined yet, so get defaults using EPAR/TEAL.\n #\n # Also insure that the input_dict (user-specified values) are folded in\n # with a fully populated configObj instance.\n configObj = util.getDefaultConfigObj(__taskname__,configObj,input_dict,loadOnly=loadOnly)\n if configObj is None:\n return\n # Define list of imageObject instances and output WCSObject instance\n # based on input paramters\n imgObjList,outwcs = processInput.setCommonInput(configObj)\n\n # Build DQ masks for all input images.\n buildMask(imgObjList,configObj)", "response": "Build DQ masks from all input images then apply static mask ( s."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef buildDQMasks(imageObjectList,configObj):\n # Insure that input imageObject is a list\n if not isinstance(imageObjectList, list):\n imageObjectList = [imageObjectList]\n\n for img in imageObjectList:\n img.buildMask(configObj['single'], configObj['bits'])", "response": "Builds DQ masks for all input images."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef buildMask(dqarr, bitvalue):\n return bitfield_to_boolean_mask(dqarr, bitvalue, good_mask_value=1,\n dtype=np.uint8)", "response": "Builds a bit - mask from an input DQ array and a bitvalue flag"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nbuilds a mask image from a rootname s DQ array and returns the name of the mask image.", "response": "def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1):\n \"\"\" Builds mask image from rootname's DQ array\n If there is no valid 'DQ' array in image, then return\n an empty string.\n \"\"\"\n\n # If no bitvalue is set or rootname given, assume no mask is desired\n # However, this name would be useful as the output mask from\n # other processing, such as MultiDrizzle, so return it anyway.\n #if bitvalue == None or rootname == None:\n # return None\n\n # build output name\n maskname = output\n\n # If an old version of the maskfile was present, remove it and rebuild it.\n if fileutil.findFile(maskname):\n fileutil.removeFile(maskname)\n\n # Open input file with DQ array\n fdq = fileutil.openImage(rootname, mode='readonly', memmap=False)\n try:\n _extn = fileutil.findExtname(fdq, extname, extver=extver)\n if _extn is not None:\n # Read in DQ array\n dqarr = fdq[_extn].data\n else:\n dqarr = None\n\n # For the case where there is no DQ array,\n # create a mask image of all ones.\n if dqarr is None:\n # We need to get the dimensions of the output DQ array\n # Since the DQ array is non-existent, look for the SCI extension\n _sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver)\n if _sci_extn is not None:\n _shape = fdq[_sci_extn].data.shape\n dqarr = np.zeros(_shape,dtype=np.uint16)\n else:\n raise Exception\n # Build mask array from DQ array\n maskarr = buildMask(dqarr,bitvalue)\n #Write out the mask file as simple FITS file\n fmask = fits.open(maskname, mode='append', memmap=False)\n maskhdu = fits.PrimaryHDU(data = maskarr)\n fmask.append(maskhdu)\n\n #Close files\n fmask.close()\n del fmask\n fdq.close()\n del fdq\n\n except:\n fdq.close()\n del fdq\n # Safeguard against leaving behind an incomplete file\n if fileutil.findFile(maskname):\n os.remove(maskname)\n _errstr = \"\\nWarning: Problem creating MASK file for \"+rootname+\".\\n\"\n #raise IOError, _errstr\n print(_errstr)\n return None\n\n # Return the name of the mask image written out\n return maskname"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef buildShadowMaskImage(dqfile,detnum,extnum,maskname,bitvalue=None,binned=1):\n # insure detnum is a string\n if type(detnum) != type(''):\n detnum = repr(detnum)\n\n _funcroot = '_func_Shadow_WF'\n\n # build template shadow mask's filename\n\n # If an old version of the maskfile was present, remove it and rebuild it.\n if fileutil.findFile(maskname):\n fileutil.removeFile(maskname)\n\n _use_inmask = not fileutil.findFile(dqfile) or bitvalue is None\n\n # Check for existance of input .c1h file for use in making inmask file\n if _use_inmask:\n #_mask = 'wfpc2_inmask'+detnum+'.fits'\n _mask = maskname\n # Check to see if file exists...\n if not fileutil.findFile(_mask):\n # If not, create the file.\n # This takes a long time to run, so it should be done\n # only when absolutely necessary...\n try:\n _funcx = _funcroot+detnum+'x'\n _funcy = _funcroot+detnum+'y'\n\n _xarr = np.clip(np.fromfunction(eval(_funcx),(800,800)),0.0,1.0).astype(np.uint8)\n _yarr = np.clip(np.fromfunction(eval(_funcy),(800,800)),0.0,1.0).astype(np.uint8)\n maskarr = _xarr * _yarr\n\n if binned !=1:\n bmaskarr = maskarr[::2,::2]\n bmaskarr *= maskarr[1::2,::2]\n bmaskarr *= maskarr[::2,1::2]\n bmaskarr *= maskarr[1::2,1::2]\n maskarr = bmaskarr.copy()\n del bmaskarr\n\n #Write out the mask file as simple FITS file\n fmask = fits.open(_mask, mode='append', memmap=False)\n maskhdu = fits.PrimaryHDU(data=maskarr)\n fmask.append(maskhdu)\n\n #Close files\n fmask.close()\n del fmask\n except:\n return None\n\n else:\n #\n # Build full mask based on .c1h and shadow mask\n #\n fdq = fileutil.openImage(dqfile, mode='readonly', memmap=False)\n try:\n # Read in DQ array from .c1h and from shadow mask files\n dqarr = fdq[int(extnum)].data\n #maskarr = fsmask[0].data\n\n # Build mask array from DQ array\n dqmaskarr = buildMask(dqarr,bitvalue)\n\n #Write out the mask file as simple FITS file\n fdqmask = fits.open(maskname, mode='append', memmap=False)\n maskhdu = fits.PrimaryHDU(data=dqmaskarr)\n fdqmask.append(maskhdu)\n\n #Close files\n fdqmask.close()\n del fdqmask\n fdq.close()\n del fdq\n\n except:\n fdq.close()\n del fdq\n # Safeguard against leaving behind an incomplete file\n if fileutil.findFile(maskname):\n os.remove(maskname)\n _errstr = \"\\nWarning: Problem creating DQMASK file for \"+rootname+\".\\n\"\n #raise IOError, _errstr\n print(_errstr)\n return None\n\n\n # Return the name of the mask image written out\n return maskname", "response": "Builds a mask image from a detector and a mask file."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None,\n hms=True, precision=6,output=None,verbose=True):\n \"\"\" Primary interface to perform coordinate transformations from\n pixel to sky coordinates using STWCS and full distortion models\n read from the input image header.\n \"\"\"\n single_coord = False\n # Only use value provided in `coords` if nothing has been specified for coordfile\n if coords is not None and coordfile is None:\n coordfile = coords\n warnings.simplefilter('always',DeprecationWarning)\n warnings.warn(\"Please update calling code to pass in `coordfile` instead of `coords`.\",\n category=DeprecationWarning)\n warnings.simplefilter('default',DeprecationWarning)\n\n if coordfile is not None:\n if colnames in blank_list:\n colnames = ['c1','c2']\n # Determine columns which contain pixel positions\n cols = util.parse_colnames(colnames,coordfile)\n # read in columns from input coordinates file\n xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)\n if xyvals.ndim == 1: # only 1 entry in coordfile\n xlist = [xyvals[0].copy()]\n ylist = [xyvals[1].copy()]\n else:\n xlist = xyvals[:,0].copy()\n ylist = xyvals[:,1].copy()\n del xyvals\n else:\n if isinstance(x, np.ndarray):\n xlist = x.tolist()\n ylist = y.tolist()\n elif not isinstance(x,list):\n xlist = [x]\n ylist = [y]\n single_coord = True\n else:\n xlist = x\n ylist = y\n\n # start by reading in WCS+distortion info for input image\n inwcs = wcsutil.HSTWCS(input)\n if inwcs.wcs.is_unity():\n print(\"####\\nNo valid WCS found in {}.\\n Results may be invalid.\\n####\\n\".format(input))\n\n # Now, convert pixel coordinates into sky coordinates\n dra,ddec = inwcs.all_pix2world(xlist,ylist,1)\n\n # convert to HH:MM:SS.S format, if specified\n if hms:\n ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision)\n rastr = ra\n decstr = dec\n else:\n # add formatting based on precision here...\n rastr = []\n decstr = []\n fmt = \"%.\"+repr(precision)+\"f\"\n for r,d in zip(dra,ddec):\n rastr.append(fmt%r)\n decstr.append(fmt%d)\n\n ra = dra\n dec = ddec\n\n if verbose or (not verbose and util.is_blank(output)):\n print('# Coordinate transformations for ',input)\n print('# X Y RA Dec\\n')\n for x,y,r,d in zip(xlist,ylist,rastr,decstr):\n print(\"%.4f %.4f %s %s\"%(x,y,r,d))\n\n # Create output file, if specified\n if output:\n f = open(output,mode='w')\n f.write(\"# Coordinates converted from %s\\n\"%input)\n for r,d in zip(rastr,decstr):\n f.write('%s %s\\n'%(r,d))\n f.close()\n print('Wrote out results to: ',output)\n\n if single_coord:\n ra = ra[0]\n dec = dec[0]\n\n return ra,dec", "response": "This function takes a input image file x y coordinates and returns a list of sky coordinates."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef run(configObj, wcsmap=None):\n\n # Insure all output filenames specified have .fits extensions\n if configObj['outdata'][-5:] != '.fits': configObj['outdata'] += '.fits'\n if not util.is_blank(configObj['outweight']) and configObj['outweight'][-5:] != '.fits': configObj['outweight'] += '.fits'\n if not util.is_blank(configObj['outcontext']) and configObj['outcontext'][-5:] != '.fits': configObj['outcontext'] += '.fits'\n\n # Keep track of any files we need to open\n in_sci_handle = None\n in_wht_handle = None\n out_sci_handle = None\n out_wht_handle = None\n out_con_handle = None\n\n _wcskey = configObj['wcskey']\n if util.is_blank(_wcskey):\n _wcskey = ' '\n\n scale_pars = configObj['Data Scaling Parameters']\n user_wcs_pars = configObj['User WCS Parameters']\n\n # Open the SCI (and WHT?) image\n # read file to get science array\n insci = get_data(configObj['input'])\n expin = fileutil.getKeyword(configObj['input'],scale_pars['expkey'])\n in_sci_phdr = fits.getheader(fileutil.parseFilename(configObj['input'])[0], memmap=False)\n\n # we need to read in the input WCS\n input_wcs = stwcs.wcsutil.HSTWCS(configObj['input'],wcskey=_wcskey)\n\n if not util.is_blank(configObj['inweight']):\n inwht = get_data(configObj['inweight']).astype(np.float32)\n else:\n # Generate a default weight map of all good pixels\n inwht = np.ones(insci.shape,dtype=insci.dtype)\n\n output_exists = False\n outname = fileutil.osfn(fileutil.parseFilename(configObj['outdata'])[0])\n if os.path.exists(outname):\n output_exists = True\n # Output was specified as a filename, so open it in 'update' mode\n outsci = get_data(configObj['outdata'])\n\n if output_exists:\n # we also need to read in the output WCS from pre-existing output\n output_wcs = stwcs.wcsutil.HSTWCS(configObj['outdata'])\n\n out_sci_hdr = fits.getheader(outname, memmap=False)\n outexptime = out_sci_hdr['DRIZEXPT']\n if 'ndrizim' in out_sci_hdr:\n uniqid = out_sci_hdr['ndrizim']+1\n else:\n uniqid = 1\n\n else: # otherwise, define the output WCS either from user pars or refimage\n if util.is_blank(configObj['User WCS Parameters']['refimage']):\n # Define a WCS based on user provided WCS values\n # NOTE:\n # All parameters must be specified, not just one or a few\n if not util.is_blank(user_wcs_pars['outscale']):\n output_wcs = wcs_functions.build_hstwcs(\n user_wcs_pars['raref'], user_wcs_pars['decref'],\n user_wcs_pars['xrefpix'], user_wcs_pars['yrefpix'],\n user_wcs_pars['outnx'], user_wcs_pars['outny'],\n user_wcs_pars['outscale'], user_wcs_pars['orient'] )\n else:\n # Define default WCS based on input image\n applydist = True\n if input_wcs.sip is None or input_wcs.instrument=='DEFAULT':\n applydist = False\n output_wcs = stwcs.distortion.utils.output_wcs([input_wcs],undistort=applydist)\n else:\n refimage = configObj['User WCS Parameters']['refimage']\n refroot,extroot = fileutil.parseFilename(refimage)\n if extroot is None:\n fimg = fits.open(refroot, memmap=False)\n for i,extn in enumerate(fimg):\n if 'CRVAL1' in extn.header: # Key on CRVAL1 for valid WCS\n refwcs = wcsutil.HSTWCS('{}[{}]'.format(refroot,i))\n if refwcs.wcs.has_cd():\n extroot = i\n break\n fimg.close()\n # try to find extension with valid WCS\n refimage = \"{}[{}]\".format(refroot,extroot)\n # Define the output WCS based on a user specified reference image WCS\n output_wcs = stwcs.wcsutil.HSTWCS(refimage)\n # Initialize values used for combining results\n outexptime = 0.0\n uniqid = 1\n\n # Set up the output data array and insure that the units for that array is 'cps'\n if outsci is None:\n # Define a default blank array based on definition of output_wcs\n outsci = np.empty(output_wcs.array_shape, dtype=np.float32)\n outsci.fill(np.nan)\n else:\n # Convert array to units of 'cps', if needed\n if outexptime != 0.0:\n np.divide(outsci, outexptime, outsci)\n outsci = outsci.astype(np.float32)\n\n # Now update output exposure time for additional input file\n outexptime += expin\n\n outwht = None\n if not util.is_blank(configObj['outweight']):\n outwht = get_data(configObj['outweight'])\n\n if outwht is None:\n outwht = np.zeros(output_wcs.array_shape, dtype=np.float32)\n else:\n outwht = outwht.astype(np.float32)\n\n outcon = None\n keep_con = False\n\n if not util.is_blank(configObj['outcontext']):\n outcon = get_data(configObj['outcontext'])\n keep_con = True\n if outcon is None:\n outcon = np.zeros((1,) + output_wcs.array_shape, dtype=np.int32)\n else:\n outcon = outcon.astype(np.int32)\n planeid = int((uniqid - 1)/ 32)\n\n # Add a new plane to the context image if planeid overflows\n while outcon.shape[0] <= planeid:\n plane = np.zeros_like(outcon[0])\n outcon = np.append(outcon, plane, axis=0)\n\n # Interpret wt_scl parameter\n if configObj['wt_scl'] == 'exptime':\n wt_scl = expin\n elif configObj['wt_scl'] == 'expsq':\n wt_scl = expin*expin\n else:\n wt_scl = float(configObj['wt_scl'])\n\n # Interpret coeffs parameter to determine whether to apply coeffs or not\n undistort = True\n if not configObj['coeffs'] or input_wcs.sip is None or input_wcs.instrument == 'DEFAULT':\n undistort = False\n # turn off use of coefficients if undistort is False (coeffs == False)\n if not undistort:\n input_wcs.sip = None\n input_wcs.cpdis1 = None\n input_wcs.cpdis2 = None\n input_wcs.det2im = None\n\n wcslin = distortion.utils.output_wcs([input_wcs],undistort=undistort)\n\n # Perform actual drizzling now...\n _vers = do_driz(insci, input_wcs, inwht,\n output_wcs, outsci, outwht, outcon,\n expin, scale_pars['in_units'],\n wt_scl, wcslin_pscale=wcslin.pscale ,uniqid=uniqid,\n pixfrac=configObj['pixfrac'], kernel=configObj['kernel'],\n fillval=scale_pars['fillval'], stepsize=configObj['stepsize'],\n wcsmap=None)\n\n out_sci_handle,outextn = create_output(configObj['outdata'])\n if not output_exists:\n # Also, define default header based on input image Primary header\n out_sci_handle[outextn].header = in_sci_phdr.copy()\n\n # Update header of output image with exptime used to scale the output data\n # if out_units is not counts, this will simply be a value of 1.0\n # the keyword 'exptime' will always contain the total exposure time\n # of all input image regardless of the output units\n out_sci_handle[outextn].header['EXPTIME'] = outexptime\n\n # create CTYPE strings\n ctype1 = input_wcs.wcs.ctype[0]\n ctype2 = input_wcs.wcs.ctype[1]\n if ctype1.find('-SIP'): ctype1 = ctype1.replace('-SIP','')\n if ctype2.find('-SIP'): ctype2 = ctype2.replace('-SIP','')\n\n # Update header with WCS keywords\n out_sci_handle[outextn].header['ORIENTAT'] = output_wcs.orientat\n out_sci_handle[outextn].header['CD1_1'] = output_wcs.wcs.cd[0][0]\n out_sci_handle[outextn].header['CD1_2'] = output_wcs.wcs.cd[0][1]\n out_sci_handle[outextn].header['CD2_1'] = output_wcs.wcs.cd[1][0]\n out_sci_handle[outextn].header['CD2_2'] = output_wcs.wcs.cd[1][1]\n out_sci_handle[outextn].header['CRVAL1'] = output_wcs.wcs.crval[0]\n out_sci_handle[outextn].header['CRVAL2'] = output_wcs.wcs.crval[1]\n out_sci_handle[outextn].header['CRPIX1'] = output_wcs.wcs.crpix[0]\n out_sci_handle[outextn].header['CRPIX2'] = output_wcs.wcs.crpix[1]\n out_sci_handle[outextn].header['CTYPE1'] = ctype1\n out_sci_handle[outextn].header['CTYPE2'] = ctype2\n out_sci_handle[outextn].header['VAFACTOR'] = 1.0\n\n\n if scale_pars['out_units'] == 'counts':\n np.multiply(outsci, outexptime, outsci)\n out_sci_handle[outextn].header['DRIZEXPT'] = outexptime\n\n else:\n out_sci_handle[outextn].header['DRIZEXPT'] = 1.0\n\n # Update header keyword NDRIZIM to keep track of how many images have\n # been combined in this product so far\n out_sci_handle[outextn].header['NDRIZIM'] = uniqid\n\n #define keywords to be written out to product header\n drizdict = outputimage.DRIZ_KEYWORDS.copy()\n\n # Update drizdict with current values\n drizdict['VER']['value'] = _vers[:44]\n drizdict['DATA']['value'] = configObj['input'][:64]\n drizdict['DEXP']['value'] = expin\n drizdict['OUDA']['value'] = configObj['outdata'][:64]\n drizdict['OUWE']['value'] = configObj['outweight'][:64]\n drizdict['OUCO']['value'] = configObj['outcontext'][:64]\n drizdict['MASK']['value'] = configObj['inweight'][:64]\n drizdict['WTSC']['value'] = wt_scl\n drizdict['KERN']['value'] = configObj['kernel']\n drizdict['PIXF']['value'] = configObj['pixfrac']\n drizdict['OUUN']['value'] = scale_pars['out_units']\n drizdict['FVAL']['value'] = scale_pars['fillval']\n drizdict['WKEY']['value'] = configObj['wcskey']\n outputimage.writeDrizKeywords(out_sci_handle[outextn].header,uniqid,drizdict)\n\n # add output array to output file\n out_sci_handle[outextn].data = outsci\n out_sci_handle.close()\n\n if not util.is_blank(configObj['outweight']):\n out_wht_handle,outwhtext = create_output(configObj['outweight'])\n out_wht_handle[outwhtext].header = out_sci_handle[outextn].header.copy()\n out_wht_handle[outwhtext].data = outwht\n out_wht_handle.close()\n\n if keep_con:\n out_con_handle,outconext = create_output(configObj['outcontext'])\n out_con_handle[outconext].data = outcon\n out_con_handle.close()", "response": "This function runs the wdrizzle from TEAL or Python command - line."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nmerge static or CR mask with DQ array on - the - fly here.", "response": "def mergeDQarray(maskname,dqarr):\n \"\"\" Merge static or CR mask with mask created from DQ array on-the-fly here.\n \"\"\"\n maskarr = None\n if maskname is not None:\n if isinstance(maskname, str):\n # working with file on disk (default case)\n if os.path.exists(maskname):\n mask = fileutil.openImage(maskname, memmap=False)\n maskarr = mask[0].data.astype(np.bool)\n mask.close()\n else:\n if isinstance(maskname, fits.HDUList):\n # working with a virtual input file\n maskarr = maskname[0].data.astype(np.bool)\n else:\n maskarr = maskname.data.astype(np.bool)\n\n if maskarr is not None:\n # merge array with dqarr now\n np.bitwise_and(dqarr,maskarr,dqarr)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _setDefaults(configObj={}):\n\n paramDict={\"build\":True,\n \"single\":True,\n \"stepsize\":10,\n \"in_units\":\"cps\",\n \"wt_scl\":1.,\n \"pixfrac\":1.,\n \"kernel\":\"square\",\n \"fillval\":999.,\n \"maskval\": None,\n \"rot\":0.,\n \"scale\":1.,\n \"xsh\":0.,\n \"ysh\":0.,\n \"blotnx\":2048,\n \"blotny\":2048,\n \"outnx\":4096,\n \"outny\":4096,\n \"data\":None,\n \"driz_separate\":True,\n \"driz_combine\":False}\n\n if(len(configObj) !=0):\n for key in configObj.keys():\n paramDict[key]=configObj[key]\n\n return paramDict", "response": "set up the default parameters to run drizzle on the next iteration"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef interpret_maskval(paramDict):\n # interpret user specified final_maskval value to use for initializing\n # output SCI array...\n if 'maskval' not in paramDict:\n return 0\n maskval = paramDict['maskval']\n if maskval is None:\n maskval = np.nan\n else:\n maskval = float(maskval) # just to be clear and absolutely sure...\n return maskval", "response": "Interpret the final_maskval value in the paramDict."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nperforms drizzle operation on input to create output. The input parameters originally was a list of dictionaries, one for each input, that matches the primary parameters for an ``IRAF`` `drizzle` task. This method would then loop over all the entries in the list and run `drizzle` for each entry. Parameters required for input in paramDict: build,single,units,wt_scl,pixfrac,kernel,fillval, rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data", "response": "def run_driz(imageObjectList,output_wcs,paramDict,single,build,wcsmap=None):\n \"\"\" Perform drizzle operation on input to create output.\n The input parameters originally was a list\n of dictionaries, one for each input, that matches the\n primary parameters for an ``IRAF`` `drizzle` task.\n\n This method would then loop over all the entries in the\n list and run `drizzle` for each entry.\n\n Parameters required for input in paramDict:\n build,single,units,wt_scl,pixfrac,kernel,fillval,\n rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data\n \"\"\"\n # Insure that input imageObject is a list\n if not isinstance(imageObjectList, list):\n imageObjectList = [imageObjectList]\n\n #\n # Setup the versions info dictionary for output to PRIMARY header\n # The keys will be used as the name reported in the header, as-is\n #\n _versions = {'AstroDrizzle':__version__,\n 'PyFITS':util.__fits_version__,\n 'Numpy':util.__numpy_version__}\n\n # Set sub-sampling rate for drizzling\n #stepsize = 2.0\n log.info(' **Using sub-sampling value of %s for kernel %s' %\n (paramDict['stepsize'], paramDict['kernel']))\n\n maskval = interpret_maskval(paramDict)\n\n outwcs = copy.deepcopy(output_wcs)\n\n # Check for existance of output file.\n if (not single and build and\n fileutil.findFile(imageObjectList[0].outputNames['outFinal'])):\n log.info('Removing previous output product...')\n os.remove(imageObjectList[0].outputNames['outFinal'])\n\n # print out parameters being used for drizzling\n log.info(\"Running Drizzle to create output frame with WCS of: \")\n output_wcs.printwcs()\n\n # Will we be running in parallel?\n pool_size = util.get_pool_size(paramDict.get('num_cores'), len(imageObjectList))\n will_parallel = single and pool_size > 1\n if will_parallel:\n log.info('Executing %d parallel workers' % pool_size)\n else:\n if single: # not yet an option for final drizzle, msg would confuse\n log.info('Executing serially')\n\n # Set parameters for each input and run drizzle on it here.\n #\n # Perform drizzling...\n\n numctx = 0\n for img in imageObjectList:\n numctx += img._nmembers\n _numctx = {'all':numctx}\n\n # if single:\n # Determine how many chips make up each single image\n for img in imageObjectList:\n for chip in img.returnAllChips(extname=img.scienceExt):\n plsingle = chip.outputNames['outSingle']\n if plsingle in _numctx: _numctx[plsingle] += 1\n else: _numctx[plsingle] = 1\n\n # Compute how many planes will be needed for the context image.\n _nplanes = int((_numctx['all']-1) / 32) + 1\n # For single drizzling or when context is turned off,\n # minimize to 1 plane only...\n if single or imageObjectList[0][1].outputNames['outContext'] in [None,'',' ']:\n _nplanes = 1\n\n #\n # An image buffer needs to be setup for converting the input\n # arrays (sci and wht) from FITS format to native format\n # with respect to byteorder and byteswapping.\n # This buffer should be reused for each input if possible.\n #\n _outsci = _outwht = _outctx = _hdrlist = None\n if (not single) or \\\n (single and (not will_parallel) and (not imageObjectList[0].inmemory)):\n # Note there are four cases/combinations for single drizzle alone here:\n # (not-inmem, serial), (not-inmem, parallel), (inmem, serial), (inmem, parallel)\n _outsci=np.empty(output_wcs.array_shape, dtype=np.float32)\n _outsci.fill(maskval)\n _outwht=np.zeros(output_wcs.array_shape, dtype=np.float32)\n # initialize context to 3-D array but only pass appropriate plane to drizzle as needed\n _outctx=np.zeros((_nplanes,) + output_wcs.array_shape, dtype=np.int32)\n _hdrlist = []\n\n # Keep track of how many chips have been processed\n # For single case, this will determine when to close\n # one product and open the next.\n _chipIdx = 0\n\n # Remember the name of the 1st image that goes into this particular product\n # Insure that the header reports the proper values for the start of the\n # exposure time used to make this; in particular, TIME-OBS and DATE-OBS.\n template = None\n\n #\n # Work on each image\n #\n subprocs = []\n for img in imageObjectList:\n\n chiplist = img.returnAllChips(extname=img.scienceExt)\n\n # How many inputs should go into this product?\n num_in_prod = _numctx['all']\n if single:\n num_in_prod = _numctx[chiplist[0].outputNames['outSingle']]\n\n # The name of the 1st image\n fnames = []\n for chip in chiplist:\n fnames.append(chip.outputNames['data'])\n\n if _chipIdx == 0:\n template = fnames\n else:\n template.extend(fnames)\n\n # Work each image, possibly in parallel\n if will_parallel:\n # use multiprocessing.Manager only if in parallel and in memory\n if img.inmemory:\n manager = multiprocessing.Manager()\n dproxy = manager.dict(img.virtualOutputs) # copy & wrap it in proxy\n img.virtualOutputs = dproxy\n\n # parallelize run_driz_img (currently for separate drizzle only)\n p = multiprocessing.Process(target=run_driz_img,\n name='adrizzle.run_driz_img()', # for err msgs\n args=(img,chiplist,output_wcs,outwcs,template,paramDict,\n single,num_in_prod,build,_versions,_numctx,_nplanes,\n _chipIdx,None,None,None,None,wcsmap))\n subprocs.append(p)\n else:\n # serial run_driz_img run (either separate drizzle or final drizzle)\n run_driz_img(img,chiplist,output_wcs,outwcs,template,paramDict,\n single,num_in_prod,build,_versions,_numctx,_nplanes,\n _chipIdx,_outsci,_outwht,_outctx,_hdrlist,wcsmap)\n\n # Increment/reset master chip counter\n _chipIdx += len(chiplist)\n if _chipIdx == num_in_prod:\n _chipIdx = 0\n\n # do the join if we spawned tasks\n if will_parallel:\n mputil.launch_and_wait(subprocs, pool_size) # blocks till all done\n\n del _outsci,_outwht,_outctx,_hdrlist"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef run_driz_img(img,chiplist,output_wcs,outwcs,template,paramDict,single,\n num_in_prod,build,_versions,_numctx,_nplanes,chipIdxCopy,\n _outsci,_outwht,_outctx,_hdrlist,wcsmap):\n \"\"\" Perform the drizzle operation on a single image.\n This is separated out from :py:func:`run_driz` so as to keep together\n the entirety of the code which is inside the loop over\n images. See the :py:func:`run_driz` code for more documentation.\n \"\"\"\n maskval = interpret_maskval(paramDict)\n\n\n # Check for unintialized inputs\n here = _outsci is None and _outwht is None and _outctx is None\n if _outsci is None:\n _outsci=np.empty(output_wcs.array_shape, dtype=np.float32)\n if single:\n _outsci.fill(0)\n else:\n _outsci.fill(maskval)\n if _outwht is None:\n _outwht=np.zeros(output_wcs.array_shape, dtype=np.float32)\n if _outctx is None:\n _outctx = np.zeros((_nplanes,) + output_wcs.array_shape, dtype=np.int32)\n if _hdrlist is None:\n _hdrlist = []\n\n # Work on each chip - note that they share access to the arrays above\n for chip in chiplist:\n # See if we will be writing out data\n doWrite = chipIdxCopy == num_in_prod-1\n\n# debuglog('#chips='+str(chipIdxCopy)+', num_in_prod='+\\\n# str(num_in_prod)+', single='+str(single)+', write='+\\\n# str(doWrite)+', here='+str(here))\n\n # run_driz_chip\n run_driz_chip(img,chip,output_wcs,outwcs,template,paramDict,\n single,doWrite,build,_versions,_numctx,_nplanes,\n chipIdxCopy,_outsci,_outwht,_outctx,_hdrlist,wcsmap)\n\n # Increment chip counter (also done outside of this function)\n chipIdxCopy += 1\n\n #\n # Reset for next output image...\n #\n if here:\n del _outsci,_outwht,_outctx,_hdrlist\n elif single:\n np.multiply(_outsci,0.,_outsci)\n np.multiply(_outwht,0.,_outwht)\n np.multiply(_outctx,0,_outctx)\n # this was \"_hdrlist=[]\", but we need to preserve the var ptr itself\n while len(_hdrlist)>0: _hdrlist.pop()", "response": "This function is used to run drizzle on a single image."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef run_driz_chip(img,chip,output_wcs,outwcs,template,paramDict,single,\n doWrite,build,_versions,_numctx,_nplanes,_numchips,\n _outsci,_outwht,_outctx,_hdrlist,wcsmap):\n \"\"\" Perform the drizzle operation on a single chip.\n This is separated out from `run_driz_img` so as to keep together\n the entirety of the code which is inside the loop over\n chips. See the `run_driz` code for more documentation.\n \"\"\"\n global time_pre_all, time_driz_all, time_post_all, time_write_all\n\n epoch = time.time()\n\n # Look for sky-subtracted product\n if os.path.exists(chip.outputNames['outSky']):\n chipextn = '['+chip.header['extname']+','+str(chip.header['extver'])+']'\n _expname = chip.outputNames['outSky']+chipextn\n else:\n # If sky-subtracted product does not exist, use regular input\n _expname = chip.outputNames['data']\n log.info('-Drizzle input: %s' % _expname)\n\n # Open the SCI image\n _handle = fileutil.openImage(_expname, mode='readonly', memmap=False)\n _sciext = _handle[chip.header['extname'],chip.header['extver']]\n\n # Apply sky subtraction and unit conversion to input array\n if chip.computedSky is None:\n _insci = _sciext.data\n else:\n log.info(\"Applying sky value of %0.6f to %s\"%(chip.computedSky,_expname))\n _insci = _sciext.data - chip.computedSky\n # If input SCI image is still integer format (RAW files)\n # transform it to float32 for all subsequent operations\n # needed for numpy >=1.12.x\n if np.issubdtype(_insci[0,0],np.int16):\n _insci = _insci.astype(np.float32)\n\n _insci *= chip._effGain\n\n # Set additional parameters needed by 'drizzle'\n _in_units = chip.in_units.lower()\n if _in_units == 'cps':\n _expin = 1.0\n else:\n _expin = chip._exptime\n\n ####\n #\n # Put the units keyword handling in the imageObject class\n #\n ####\n # Determine output value of BUNITS\n # and make sure it is not specified as 'ergs/cm...'\n _bunit = chip._bunit\n\n _bindx = _bunit.find('/')\n\n if paramDict['units'] == 'cps':\n # If BUNIT value does not specify count rate already...\n if _bindx < 1:\n # ... append '/SEC' to value\n _bunit += '/S'\n else:\n # reset _bunit here to None so it does not\n # overwrite what is already in header\n _bunit = None\n else:\n if _bindx > 0:\n # remove '/S'\n _bunit = _bunit[:_bindx]\n else:\n # reset _bunit here to None so it does not\n # overwrite what is already in header\n _bunit = None\n\n _uniqid = _numchips + 1\n if _nplanes == 1:\n # We need to reset what gets passed to TDRIZ\n # when only 1 context image plane gets generated\n # to prevent overflow problems with trying to access\n # planes that weren't created for large numbers of inputs.\n _uniqid = ((_uniqid-1) % 32) + 1\n\n # Select which mask needs to be read in for drizzling\n ####\n #\n # Actually need to generate mask file here 'on-demand'\n # and combine it with the static_mask for single_drizzle case...\n #\n ####\n # Build basic DQMask from DQ array and bits value\n dqarr = img.buildMask(chip._chip,bits=paramDict['bits'])\n\n # get correct mask filenames/objects\n staticMaskName = chip.outputNames['staticMask']\n crMaskName = chip.outputNames['crmaskImage']\n\n if img.inmemory:\n if staticMaskName in img.virtualOutputs:\n staticMaskName = img.virtualOutputs[staticMaskName]\n if crMaskName in img.virtualOutputs:\n crMaskName = img.virtualOutputs[crMaskName]\n\n # Merge appropriate additional mask(s) with DQ mask\n if single:\n mergeDQarray(staticMaskName,dqarr)\n if dqarr.sum() == 0:\n log.warning('All pixels masked out when applying static mask!')\n else:\n mergeDQarray(staticMaskName,dqarr)\n\n if dqarr.sum() == 0:\n log.warning('All pixels masked out when applying static mask!')\n else:\n # Only apply cosmic-ray mask when some good pixels remain after\n # applying the static mask\n mergeDQarray(crMaskName,dqarr)\n\n if dqarr.sum() == 0:\n log.warning('WARNING: All pixels masked out when applying '\n 'cosmic ray mask to %s' % _expname)\n updateInputDQArray(chip.dqfile,chip.dq_extn,chip._chip,\n crMaskName, paramDict['crbit'])\n\n img.set_wtscl(chip._chip,paramDict['wt_scl'])\n\n pix_ratio = outwcs.pscale / chip.wcslin_pscale\n\n # Convert mask to a datatype expected by 'tdriz'\n # Also, base weight mask on ERR or IVM file as requested by user\n wht_type = paramDict['wht_type']\n\n if wht_type == 'ERR':\n _inwht = img.buildERRmask(chip._chip,dqarr,pix_ratio)\n elif wht_type == 'IVM':\n _inwht = img.buildIVMmask(chip._chip,dqarr,pix_ratio)\n elif wht_type == 'EXP':\n _inwht = img.buildEXPmask(chip._chip,dqarr)\n else: # wht_type == None, used for single drizzle images\n _inwht = chip._exptime * dqarr.astype(np.float32)\n\n if not(paramDict['clean']):\n # Write out mask file if 'clean' has been turned off\n if single:\n step_mask = 'singleDrizMask'\n else:\n step_mask = 'finalMask'\n\n _outmaskname = chip.outputNames[step_mask]\n if os.path.exists(_outmaskname): os.remove(_outmaskname)\n pimg = fits.PrimaryHDU(data=_inwht)\n img.saveVirtualOutputs({step_mask:pimg})\n # Only write out mask files if in_memory=False\n if not img.inmemory:\n pimg.writeto(_outmaskname)\n del pimg\n log.info('Writing out mask file: %s' % _outmaskname)\n\n time_pre = time.time() - epoch; epoch = time.time()\n # New interface to performing the drizzle operation on a single chip/image\n _vers = do_driz(_insci, chip.wcs, _inwht, outwcs, _outsci, _outwht, _outctx,\n _expin, _in_units, chip._wtscl,\n wcslin_pscale=chip.wcslin_pscale, uniqid=_uniqid,\n pixfrac=paramDict['pixfrac'], kernel=paramDict['kernel'],\n fillval=paramDict['fillval'], stepsize=paramDict['stepsize'],\n wcsmap=wcsmap)\n time_driz = time.time() - epoch; epoch = time.time()\n\n # Set up information for generating output FITS image\n #### Check to see what names need to be included here for use in _hdrlist\n chip.outputNames['driz_version'] = _vers\n chip.outputNames['driz_wcskey'] = paramDict['wcskey']\n outputvals = chip.outputNames.copy()\n\n # Update entries for names/values based on final output\n outputvals.update(img.outputValues)\n for kw in img.outputNames:\n if kw[:3] == 'out':\n outputvals[kw] = img.outputNames[kw]\n outputvals['exptime'] = chip._exptime\n outputvals['expstart'] = chip._expstart\n outputvals['expend'] = chip._expend\n\n outputvals['wt_scl_val'] = chip._wtscl\n\n _hdrlist.append(outputvals)\n time_post = time.time() - epoch; epoch = time.time()\n\n if doWrite:\n ###########################\n #\n # IMPLEMENTATION REQUIREMENT:\n #\n # Need to implement scaling of the output image\n # from 'cps' to 'counts' in the case where 'units'\n # was set to 'counts'... 21-Mar-2005\n #\n ###########################\n\n # Convert output data from electrons/sec to counts/sec as specified\n native_units = img.native_units\n if paramDict['proc_unit'].lower() == 'native' and native_units.lower()[:6] == 'counts':\n np.divide(_outsci, chip._gain, _outsci)\n _bunit = native_units.lower()\n if paramDict['units'] == 'counts':\n indx = _bunit.find('/')\n if indx > 0: _bunit = _bunit[:indx]\n\n # record IDCSCALE for output to product header\n paramDict['idcscale'] = chip.wcs.idcscale\n #If output units were set to 'counts', rescale the array in-place\n if paramDict['units'] == 'counts':\n #determine what exposure time needs to be used\n # to rescale the product.\n if single:\n _expscale = chip._exptime\n else:\n _expscale = img.outputValues['texptime']\n np.multiply(_outsci, _expscale, _outsci)\n #\n # Write output arrays to FITS file(s)\n #\n if not single:\n img.inmemory = False\n\n _outimg = outputimage.OutputImage(_hdrlist, paramDict, build=build,\n wcs=output_wcs, single=single)\n _outimg.set_bunit(_bunit)\n _outimg.set_units(paramDict['units'])\n outimgs = _outimg.writeFITS(template,_outsci,_outwht,ctxarr=_outctx,\n versions=_versions,virtual=img.inmemory)\n del _outimg\n\n # update imageObject with product in memory\n if single:\n img.saveVirtualOutputs(outimgs)\n\n # this is after the doWrite\n time_write = time.time() - epoch; epoch = time.time()\n if False and not single: # turn off all this perf reporting for now\n time_pre_all.append(time_pre)\n time_driz_all.append(time_driz)\n time_post_all.append(time_post)\n time_write_all.append(time_write)\n\n log.info('chip time pre-drizzling: %6.3f' % time_pre)\n log.info('chip time drizzling: %6.3f' % time_driz)\n log.info('chip time post-drizzling: %6.3f' % time_post)\n log.info('chip time writing output: %6.3f' % time_write)\n\n if doWrite:\n tot_pre = sum(time_pre_all)\n tot_driz = sum(time_driz_all)\n tot_post = sum(time_post_all)\n tot_write = sum(time_write_all)\n tot = tot_pre+tot_driz+tot_post+tot_write\n log.info('chip total pre-drizzling: %6.3f (%4.1f%%)' % (tot_pre, (100.*tot_pre/tot)))\n log.info('chip total drizzling: %6.3f (%4.1f%%)' % (tot_driz, (100.*tot_driz/tot)))\n log.info('chip total post-drizzling: %6.3f (%4.1f%%)' % (tot_post, (100.*tot_post/tot)))\n log.info('chip total writing output: %6.3f (%4.1f%%)' % (tot_write, (100.*tot_write/tot)))", "response": "Perform the drizzle operation on a single chip."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef do_driz(insci, input_wcs, inwht,\n output_wcs, outsci, outwht, outcon,\n expin, in_units, wt_scl,\n wcslin_pscale=1.0,uniqid=1, pixfrac=1.0, kernel='square',\n fillval=\"INDEF\", stepsize=10,wcsmap=None):\n \"\"\"\n Core routine for performing 'drizzle' operation on a single input image\n All input values will be Python objects such as ndarrays, instead\n of filenames.\n File handling (input and output) will be performed by calling routine.\n\n \"\"\"\n # Insure that the fillval parameter gets properly interpreted for use with tdriz\n if util.is_blank(fillval):\n fillval = 'INDEF'\n else:\n fillval = str(fillval)\n\n if in_units == 'cps':\n expscale = 1.0\n else:\n expscale = expin\n\n # Compute what plane of the context image this input would\n # correspond to:\n planeid = int((uniqid-1) / 32)\n\n # Check if the context image has this many planes\n if outcon.ndim == 3:\n nplanes = outcon.shape[0]\n elif outcon.ndim == 2:\n nplanes = 1\n else:\n nplanes = 0\n\n if nplanes <= planeid:\n raise IndexError(\"Not enough planes in drizzle context image\")\n\n # Alias context image to the requested plane if 3d\n if outcon.ndim == 2:\n outctx = outcon\n else:\n outctx = outcon[planeid]\n\n pix_ratio = output_wcs.pscale/wcslin_pscale\n\n if wcsmap is None and cdriz is not None:\n log.info('Using WCSLIB-based coordinate transformation...')\n log.info('stepsize = %s' % stepsize)\n mapping = cdriz.DefaultWCSMapping(\n input_wcs, output_wcs,\n input_wcs.pixel_shape[0], input_wcs.pixel_shape[1],\n stepsize\n )\n else:\n #\n ##Using the Python class for the WCS-based transformation\n #\n # Use user provided mapping function\n log.info('Using coordinate transformation defined by user...')\n if wcsmap is None:\n wcsmap = wcs_functions.WCSMap\n wmap = wcsmap(input_wcs,output_wcs)\n mapping = wmap.forward\n\n _shift_fr = 'output'\n _shift_un = 'output'\n ystart = 0\n nmiss = 0\n nskip = 0\n #\n # This call to 'cdriz.tdriz' uses the new C syntax\n #\n _dny = insci.shape[0]\n # Call 'drizzle' to perform image combination\n if insci.dtype > np.float32:\n #WARNING: Input array recast as a float32 array\n insci = insci.astype(np.float32)\n\n _vers,nmiss,nskip = cdriz.tdriz(insci, inwht, outsci, outwht,\n outctx, uniqid, ystart, 1, 1, _dny,\n pix_ratio, 1.0, 1.0, 'center', pixfrac,\n kernel, in_units, expscale, wt_scl,\n fillval, nmiss, nskip, 1, mapping)\n\n if nmiss > 0:\n log.warning('! %s points were outside the output image.' % nmiss)\n if nskip > 0:\n log.debug('! Note, %s input lines were skipped completely.' % nskip)\n\n return _vers", "response": "This routine performs the drizzle operation on a single input image and outputs the result image."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _regwrite(shapelist,outfile):\n # This function corrects bugs and provides improvements over the pyregion's\n # ShapeList.write method in the following:\n #\n # 1. ShapeList.write crashes if regions have no comments;\n # 2. ShapeList.write converts 'exclude' (\"-\") regions to normal regions (\"+\");\n # 3. ShapeList.write does not support mixed coordinate systems in a\n # region list.\n #\n # NOTE: This function is provided as a temoprary workaround for the above\n # listed problems of the ShapeList.write. We hope that a future version\n # of pyregion will address all these issues.\n #\n #TODO: Push these changes to pyregion.\n\n if len(shapelist) < 1:\n _print_warning(\"The region list is empty. The region file \\\"%s\\\" \"\\\n \"will be empty.\" % outfile)\n try:\n outf = open(outfile,'w')\n outf.close()\n return\n except IOError as e:\n cmsg = \"Unable to create region file \\'%s\\'.\" % outfile\n if e.args:\n e.args = (e.args[0] + \"\\n\" + cmsg,) + e.args[1:]\n else:\n e.args=(cmsg,)\n raise e\n except:\n raise\n\n prev_cs = shapelist[0].coord_format\n\n outf = None\n try:\n outf = open(outfile,'w')\n\n attr0 = shapelist[0].attr[1]\n defaultline = \" \".join([\"%s=%s\" % (a,attr0[a]) for a in attr0 \\\n if a!='text'])\n\n # first line is globals\n print(\"global\", defaultline, file=outf)\n # second line must be a coordinate format\n print(prev_cs, file=outf)\n\n for shape in shapelist:\n shape_attr = '' if prev_cs == shape.coord_format \\\n else shape.coord_format+\"; \"\n shape_excl = '-' if shape.exclude else ''\n text_coordlist = [\"%f\" % f for f in shape.coord_list]\n shape_coords = \"(\" + \",\".join(text_coordlist) + \")\"\n shape_comment = \" # \" + shape.comment if shape.comment else ''\n\n shape_str = shape_attr + shape_excl + shape.name + shape_coords + \\\n shape_comment\n\n print(shape_str, file=outf)\n\n except IOError as e:\n cmsg = \"Unable to create region file \\'%s\\'.\" % outfile\n if e.args:\n e.args = (e.args[0] + \"\\n\" + cmsg,) + e.args[1:]\n else:\n e.args=(cmsg,)\n if outf: outf.close()\n raise e\n except:\n if outf: outf.close()\n raise\n\n outf.close()", "response": "Writes the current shape list out as a region file."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nchecking if the region list contains shapes in image - like coordinates", "response": "def _needs_ref_WCS(reglist):\n \"\"\" Check if the region list contains shapes in image-like coordinates\n \"\"\"\n from pyregion.wcs_helper import image_like_coordformats\n\n for r in reglist:\n if r.coord_format in image_like_coordformats:\n return True\n return False"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef extension_from_filename(filename):\n # Parse out any extension specified in filename\n _indx1 = filename.find('[')\n _indx2 = filename.find(']')\n if _indx1 > 0:\n # check for closing square bracket:\n if _indx2 < _indx1:\n raise RuntimeError(\"Incorrect extension specification in file \" \\\n \"name \\'%s\\'.\" % filename)\n # Read extension name provided\n _fname = filename[:_indx1]\n _extn = filename[_indx1+1:_indx2].strip()\n else:\n _fname = filename\n _extn = None\n\n return _fname, _extn", "response": "Parse out filename from any specified extensions."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the number of extensions in the image.", "response": "def count_extensions(img, extname='SCI'):\n \"\"\" Return the number of 'extname' extensions. 'img' can be either a file\n name, an HDU List object (from fits), or None (to get the number of all\n HDU headers.\n \"\"\"\n if isinstance(img, str):\n img = fits.open(img, memmap=False)\n img.close()\n elif not isinstance(img, fits.HDUList):\n raise TypeError(\"Argument 'img' must be either a file name (string) \" \\\n \"or a `astropy.io.fits.HDUList` object.\")\n\n if extname is None:\n return len(img)\n\n if not isinstance(extname, str):\n raise TypeError(\"Argument 'extname' must be either a string \" \\\n \"indicating the value of the 'EXTNAME' keyword of the extensions \" \\\n \"to be counted or None to return the count of all HDUs in the \" \\\n \"'img' FITS file.\")\n\n extname = extname.upper()\n\n n = 0\n for e in img:\n #if isinstance(e, fits.ImageHDU): continue\n if 'EXTNAME' in list(map(str.upper, list(e.header.keys()))) \\\n and e.header['extname'].upper() == extname:\n n += 1\n\n return n"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a list of all extension versions of extname extensions.", "response": "def get_extver_list(img, extname='SCI'):\n \"\"\" Return a list of all extension versions of 'extname' extensions.\n 'img' can be either a file name or a HDU List object (from fits).\n \"\"\"\n if isinstance(img, str):\n img = fits.open(img, memmap=False)\n img.close()\n elif not isinstance(img, fits.HDUList):\n raise TypeError(\"Argument 'img' must be either a file name (string) \" \\\n \"or a fits.HDUList object.\")\n\n # when extver is None - return the range of all FITS extensions\n if extname is None:\n extver = list(range(len(img)))\n return extver\n\n if not isinstance(extname, str):\n raise TypeError(\"Argument 'extname' must be either a string \" \\\n \"indicating the value of the 'EXTNAME' keyword of the extensions \" \\\n \"whose versions are to be returned or None to return \" \\\n \"extension numbers of all HDUs in the 'img' FITS file.\")\n\n extname = extname.upper()\n\n extver = []\n for e in img:\n #if not isinstance(e, fits.ImageHDU): continue\n hkeys = list(map(str.upper, list(e.header.keys())))\n if 'EXTNAME' in hkeys and e.header['EXTNAME'].upper() == extname:\n extver.append(e.header['EXTVER'] if 'EXTVER' in hkeys else 1)\n\n return extver"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _check_FITS_extvers(img, extname, extvers):\n default_extn = 1 if isinstance(extname, str) else 0\n\n if isinstance(extvers, list):\n extv = [default_extn if ext is None else ext for ext in extvers]\n else:\n extv = [default_extn if extvers is None else extvers]\n\n extv_in_fits = get_extver_list(img, extname)\n\n return set(extv).issubset(set(extv_in_fits))", "response": "Checks if all extensions in the FITS file are present in the image. Returns True if all extensions in the FITS file are present in the image. Returns False if any of the specified extensions are not present in the image FITS file."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ngenerate image and sourcelist filenames for single - exposure products.", "response": "def single_exposure_product_filename_generator(obs_info,nn):\n \"\"\"\n Generate image and sourcelist filenames for single-exposure products\n\n Parameters\n ----------\n obs_info : list\n list of items that will be used to generate the filenames: proposal_id,\n visit_id, instrument, detector, filter, and ipppssoot\n nn : string\n the single-exposure image number (NOTE: only used in\n single_exposure_product_filename_generator())\n\n Returns\n --------\n product_filename_dict : dictionary\n A dictionary containing the generated filenames.\n \"\"\"\n proposal_id = obs_info[0]\n visit_id = obs_info[1]\n instrument = obs_info[2]\n detector = obs_info[3]\n filter = obs_info[4]\n ipppssoot = obs_info[5]\n\n product_filename_dict = {}\n product_filename_dict[\"image\"] = \"hst_{}_{}_{}_{}_{}_{}_{}.fits\".format(proposal_id,visit_id,instrument,detector,filter,ipppssoot,nn)\n product_filename_dict[\"source catalog\"]= product_filename_dict[\"image\"].replace(\".fits\",\".cat\")\n\n return(product_filename_dict)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef filter_product_filename_generator(obs_info,nn):\n proposal_id = obs_info[0]\n visit_id = obs_info[1]\n instrument = obs_info[2]\n detector = obs_info[3]\n filter = obs_info[4]\n\n product_filename_dict = {}\n product_filename_dict[\"image\"] = \"hst_{}_{}_{}_{}_{}.fits\".format(proposal_id,visit_id,instrument,detector,filter)\n product_filename_dict[\"source catalog\"] = product_filename_dict[\"image\"].replace(\".fits\",\".cat\")\n\n return(product_filename_dict)", "response": "Generate image and sourcelist filenames for filter products"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ngenerate image and sourcelist filenames for total detection products.", "response": "def total_detection_product_filename_generator(obs_info,nn):\n \"\"\"\n Generate image and sourcelist filenames for total detection products\n\n Parameters\n ----------\n obs_info : list\n list of items that will be used to generate the filenames: proposal_id,\n visit_id, instrument, and detector\n nn : string\n the single-exposure image number (NOTE: only used in\n single_exposure_product_filename_generator())\n\n Returns\n --------\n product_filename_dict : dictionary\n A dictionary containing the generated filenames.\n \"\"\"\n proposal_id = obs_info[0]\n visit_id = obs_info[1]\n instrument = obs_info[2]\n detector = obs_info[3]\n\n product_filename_dict = {}\n product_filename_dict[\"image\"] = \"hst_{}_{}_{}_{}.fits\".format(proposal_id, visit_id, instrument, detector)\n product_filename_dict[\"source catalog\"] = product_filename_dict[\"image\"].replace(\".fits\",\".cat\")\n\n return (product_filename_dict)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngenerates image and sourcelist filenames for multi - visit mosaic products.", "response": "def multivisit_mosaic_product_filename_generator(obs_info,nn):\n \"\"\"\n Generate image and sourcelist filenames for multi-visit mosaic products\n\n Parameters\n ----------\n obs_info : list\n list of items that will be used to generate the filenames: group_id,\n instrument, detector, and filter\n nn : string\n the single-exposure image number (NOTE: only used in\n single_exposure_product_filename_generator())\n\n Returns\n --------\n product_filename_dict : dictionary\n A dictionary containing the generated filenames.\n \"\"\"\n group_num = obs_info[0]\n instrument = obs_info[1]\n detector = obs_info[2]\n filter = obs_info[3]\n\n product_filename_dict = {}\n product_filename_dict[\"image\"] = \"hst_mos_{}_{}_{}_{}.fits\".format(group_num,instrument,detector,filter)\n product_filename_dict[\"source catalog\"] = product_filename_dict[\"image\"].replace(\".fits\",\".cat\")\n\n return (product_filename_dict)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef build_referenceWCS(catalog_list):\n wcslist = []\n for catalog in catalog_list:\n for scichip in catalog.catalogs:\n wcslist.append(catalog.catalogs[scichip]['wcs'])\n return utils.output_wcs(wcslist)", "response": "Build a default reference WCS from a list of Catalog objects."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper", "response": "Computes the convex hull of a set of 2D points."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncompute the initial guess for the 2D histogram for each entry in the image and the UV positions.", "response": "def _estimate_2dhist_shift(imgxy, refxy, searchrad=3.0):\n \"\"\" Create a 2D matrix-histogram which contains the delta between each\n XY position and each UV position. Then estimate initial offset\n between catalogs.\n \"\"\"\n print(\"Computing initial guess for X and Y shifts...\")\n\n # create ZP matrix\n zpmat = _xy_2dhist(imgxy, refxy, r=searchrad)\n\n nonzeros = np.count_nonzero(zpmat)\n if nonzeros == 0:\n # no matches within search radius. Return (0, 0):\n print(\"WARNING: No matches found within a search radius of {:g} \"\n \"pixels.\".format(searchrad))\n return 0.0, 0.0, 0, 0, zpmat, False\n\n elif nonzeros == 1:\n # only one non-zero bin:\n yp, xp = np.unravel_index(np.argmax(zpmat), zpmat.shape)\n maxval = int(np.ceil(zpmat[yp, xp]))\n xp -= searchrad\n yp -= searchrad\n print(\"Found initial X and Y shifts of {:.4g}, {:.4g} \"\n \"based on a single non-zero bin and {} matches\"\n .format(xp, yp, maxval))\n return xp, yp, maxval, maxval, zpmat, True\n\n (xp, yp), fit_status, fit_sl = _find_peak(zpmat, peak_fit_box=5,\n mask=zpmat > 0)\n\n if fit_status.startswith('ERROR'):\n print(\"WARNING: No valid shift found within a search radius of {:g} \"\n \"pixels.\".format(searchrad))\n maxval = int(np.ceil(zpmat.max()))\n return 0.0, 0.0, maxval, maxval, zpmat, False\n\n xp -= searchrad\n yp -= searchrad\n\n if fit_status == 'WARNING:EDGE':\n print(\n \"WARNING: Found peak in the 2D histogram lies at the edge of \"\n \"the histogram. Try increasing 'searchrad' for improved results.\"\n )\n\n # Attempt to estimate \"significance of detection\":\n maxval = zpmat.max()\n zpmat_mask = (zpmat > 0) & (zpmat < maxval)\n\n if np.any(zpmat_mask):\n bkg = zpmat[zpmat_mask].mean()\n sig = maxval / np.sqrt(bkg)\n\n flux = int(zpmat[fit_sl].sum())\n print(\"Found initial X and Y shifts of {:.4g}, {:.4g} \"\n \"with significance of {:.4g} and {:d} matches\"\n .format(xp, yp, sig, flux))\n\n return xp, yp, int(np.ceil(maxval)), flux, zpmat, True"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nfinds the location of the peak in an array.", "response": "def _find_peak(data, peak_fit_box=5, mask=None):\n \"\"\"\n Find location of the peak in an array. This is done by fitting a second\n degree 2D polynomial to the data within a `peak_fit_box` and computing the\n location of its maximum. An initial\n estimate of the position of the maximum will be performed by searching\n for the location of the pixel/array element with the maximum value.\n\n Parameters\n ----------\n data : numpy.ndarray\n 2D data.\n\n peak_fit_box : int, optional\n Size (in pixels) of the box around the initial estimate of the maximum\n to be used for quadratic fitting from which peak location is computed.\n It is assumed that fitting box is a square with sides of length\n given by ``peak_fit_box``.\n\n mask : numpy.ndarray, optional\n A boolean type `~numpy.ndarray` indicating \"good\" pixels in image data\n (`True`) and \"bad\" pixels (`False`). If not provided all pixels\n in `image_data` will be used for fitting.\n\n Returns\n -------\n coord : tuple of float\n A pair of coordinates of the peak.\n\n fit_status : str\n Status of the peak search. Currently the following values can be\n returned:\n\n - ``'SUCCESS'``: Fit was successful and peak is not on the edge of\n the input array;\n - ``'ERROR:NODATA'``: Not enough valid data to perform the fit; The\n returned coordinate is the center of input array;\n - ``'WARNING:EDGE'``: Peak lies on the edge of the input array.\n Returned coordinates are the result of a discreet search;\n - ``'WARNING:BADFIT'``: Performed fid did not find a maximum. Returned\n coordinates are the result of a discreet search;\n - ``'WARNING:CENTER-OF-MASS'``: Returned coordinates are the result\n of a center-of-mass estimate instead of a polynomial fit. This is\n either due to too few points to perform a fit or due to a\n failure of the polynomial fit.\n\n fit_box : a tuple of two tuples\n A tuple of two tuples of the form ``((x1, x2), (y1, y2))`` that\n indicates pixel ranges used for fitting (these indices can be used\n directly for slicing input data)\n\n \"\"\"\n # check arguments:\n data = np.asarray(data, dtype=np.float64)\n ny, nx = data.shape\n\n # find index of the pixel having maximum value:\n if mask is None:\n jmax, imax = np.unravel_index(np.argmax(data), data.shape)\n coord = (float(imax), float(jmax))\n\n else:\n j, i = np.indices(data.shape)\n i = i[mask]\n j = j[mask]\n\n if i.size == 0:\n # no valid data:\n coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)\n return coord, 'ERROR:NODATA', np.s_[0:ny-1, 0:nx-1]\n\n ind = np.argmax(data[mask])\n imax = i[ind]\n jmax = j[ind]\n coord = (float(imax), float(jmax))\n\n if data[jmax, imax] < 1:\n # no valid data: we need some counts in the histogram bins\n coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)\n return coord, 'ERROR:NODATA', np.s_[0:ny-1, 0:nx-1]\n\n # choose a box around maxval pixel:\n x1 = max(0, imax - peak_fit_box // 2)\n x2 = min(nx, x1 + peak_fit_box)\n y1 = max(0, jmax - peak_fit_box // 2)\n y2 = min(ny, y1 + peak_fit_box)\n\n # if peak is at the edge of the box, return integer indices of the max:\n if imax == x1 or imax == x2 or jmax == y1 or jmax == y2:\n return (float(imax), float(jmax)), 'WARNING:EDGE', np.s_[y1:y2, x1:x2]\n\n # expand the box if needed:\n if (x2 - x1) < peak_fit_box:\n if x1 == 0:\n x2 = min(nx, x1 + peak_fit_box)\n if x2 == nx:\n x1 = max(0, x2 - peak_fit_box)\n\n if (y2 - y1) < peak_fit_box:\n if y1 == 0:\n y2 = min(ny, y1 + peak_fit_box)\n if y2 == ny:\n y1 = max(0, y2 - peak_fit_box)\n\n if x2 - x1 == 0 or y2 - y1 == 0:\n # not enough data:\n coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)\n return coord, 'ERROR:NODATA', np.s_[y1:y2, x1:x2]\n\n # fit a 2D 2nd degree polynomial to data:\n xi = np.arange(x1, x2)\n yi = np.arange(y1, y2)\n x, y = np.meshgrid(xi, yi)\n x = x.ravel()\n y = y.ravel()\n v = np.vstack((np.ones_like(x), x, y, x*y, x*x, y*y)).T\n d = data[y1:y2, x1:x2].ravel()\n if mask is not None:\n m = mask[y1:y2, x1:x2].ravel()\n v = v[m]\n d = d[m]\n\n if d.size == 0 or np.max(d) <= 0:\n coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)\n return coord, 'ERROR:NODATA', np.s_[y1:y2, x1:x2]\n\n if d.size < 6:\n # we need at least 6 points to fit a 2D quadratic polynomial\n # attempt center-of-mass instead:\n dt = d.sum()\n xc = np.dot(v[:, 1], d) / dt\n yc = np.dot(v[:, 2], d) / dt\n return (xc, yc), 'WARNING:CENTER-OF-MASS', np.s_[y1:y2, x1:x2]\n\n try:\n c = np.linalg.lstsq(v, d, rcond=None)[0]\n except np.linalg.LinAlgError:\n print(\"WARNING: Least squares failed!\\n{}\".format(c))\n\n # attempt center-of-mass instead:\n dt = d.sum()\n xc = np.dot(v[:, 1], d) / dt\n yc = np.dot(v[:, 2], d) / dt\n return (xc, yc), 'WARNING:CENTER-OF-MASS', np.s_[y1:y2, x1:x2]\n\n # find maximum of the polynomial:\n _, c10, c01, c11, c20, c02 = c\n det = 4 * c02 * c20 - c11**2\n if det <= 0 or ((c20 > 0.0 and c02 >= 0.0) or (c20 >= 0.0 and c02 > 0.0)):\n # polynomial does not have max. return maximum value in the data:\n return coord, 'WARNING:BADFIT', np.s_[y1:y2, x1:x2]\n\n xm = (c01 * c11 - 2.0 * c02 * c10) / det\n ym = (c10 * c11 - 2.0 * c01 * c20) / det\n\n if 0.0 <= xm <= (nx - 1.0) and 0.0 <= ym <= (ny - 1.0):\n coord = (xm, ym)\n fit_status = 'SUCCESS'\n\n else:\n xm = 0.0 if xm < 0.0 else min(xm, nx - 1.0)\n ym = 0.0 if ym < 0.0 else min(ym, ny - 1.0)\n fit_status = 'WARNING:EDGE'\n\n return coord, fit_status, np.s_[y1:y2, x1:x2]"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef openFile(self, openDQ=False):\n\n if self._im.closed:\n if not self._dq.closed:\n self._dq.release()\n assert(self._dq.closed)\n fi = FileExtMaskInfo(clobber=False,\n doNotOpenDQ=not openDQ,\n im_fmode=self.open_mode)\n fi.image = self.name\n self._im = fi.image\n fi.append_ext(spu.get_ext_list(self._im, extname='SCI'))\n fi.finalize()\n self._im = fi.image\n self._dq = fi.DQimage\n self._imext = fi.fext\n self._dqext = fi.dqext", "response": "Open file and set up filehandle for image file"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn a list of all the input WCS objects associated with this image.", "response": "def get_wcs(self):\n \"\"\" Helper method to return a list of all the input WCS objects associated\n with this image.\n \"\"\"\n wcslist = []\n for chip in self.chip_catalogs:\n wcslist.append(self.chip_catalogs[chip]['wcs'])\n return wcslist"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef buildSkyCatalog(self):\n self.all_radec = None\n self.all_radec_orig = None\n ralist = []\n declist = []\n fluxlist = []\n idlist = []\n for scichip in self.chip_catalogs:\n skycat = self.chip_catalogs[scichip]['catalog'].radec\n xycat = self.chip_catalogs[scichip]['catalog'].xypos\n if skycat is not None:\n ralist.append(skycat[0])\n declist.append(skycat[1])\n if xycat is not None and len(xycat) > 2:\n fluxlist.append(xycat[2])\n idlist.append(xycat[3])\n elif len(skycat) > 2:\n fluxlist.append(skycat[2])\n idlist.append(skycat[3])\n else:\n fluxlist.append([999.0]*len(skycat[0]))\n idlist.append(np.arange(len(skycat[0])))\n\n self.all_radec = [np.concatenate(ralist),np.concatenate(declist),\n np.concatenate(fluxlist),np.concatenate(idlist)]\n self.all_radec_orig = copy.deepcopy(self.all_radec)", "response": "Builds a single sky catalog for all chips."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef buildDefaultRefWCS(self):\n self.default_refWCS = None\n if self.use_wcs:\n wcslist = []\n for scichip in self.chip_catalogs:\n wcslist.append(self.chip_catalogs[scichip]['wcs'])\n self.default_refWCS = utils.output_wcs(wcslist)", "response": "Generates a default reference WCS for this image."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef transformToRef(self,ref_wcs,force=False):\n if not isinstance(ref_wcs, pywcs.WCS):\n print(textutil.textbox('Reference WCS not a valid HSTWCS object'),\n file=sys.stderr)\n raise ValueError\n # Need to concatenate catalogs from each input\n if self.outxy is None or force:\n outxy = ref_wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin)\n # convert outxy list to a Nx2 array\n self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]])\n if self.pars['writecat']:\n catname = self.rootname+\"_refxy_catalog.coo\"\n self.write_outxy(catname)\n self.catalog_names['ref_xy'] = catname", "response": "Transform sky coords from ALL chips into X Y coords in reference WCS."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nsorts and clip the source catalog based on the flux range specified by the user.", "response": "def sortSkyCatalog(self):\n \"\"\" Sort and clip the source catalog based on the flux range specified\n by the user. It keeps a copy of the original full list in order to\n support iteration.\n\n \"\"\"\n if len(self.all_radec_orig[2].nonzero()[0]) == 0:\n warn_str = \"Source catalog NOT trimmed by flux/mag. No fluxes read in for sources!\"\n print('\\nWARNING: ',warn_str,'\\n')\n log.warning(warn_str)\n return\n clip_catalog = False\n clip_prefix = ''\n for k in sortKeys:\n for p in self.pars.keys():\n pindx = p.find(k)\n if pindx >= 0 and self.pars[p] is not None:\n log.info('found a match for %s to %s'%(\n str(p),str(self.pars[p])))\n # find prefix (if any)\n clip_prefix = p[:pindx].strip()\n #Only clip the catalog if one of the keys is specified\n # in the catalog parameters, not the source finding pars\n if clip_prefix and 'units' not in p:\n clip_catalog = True\n break\n if clip_catalog:\n break\n all_radec = None\n\n if clip_catalog:\n\n # Start by clipping by any specified flux range\n if self.pars[clip_prefix+'maxflux'] is not None or \\\n self.pars[clip_prefix+'minflux'] is not None:\n clip_catalog = True\n if self.pars[clip_prefix+'minflux'] is not None:\n fluxmin = self.pars[clip_prefix+'minflux']\n else:\n fluxmin = self.all_radec[2].min()\n\n if self.pars[clip_prefix+'maxflux'] is not None:\n fluxmax = self.pars[clip_prefix+'maxflux']\n else:\n fluxmax = self.all_radec[2].max()\n\n # apply flux limit clipping\n minindx = self.all_radec_orig[2] >= fluxmin\n maxindx = self.all_radec_orig[2] <= fluxmax\n flux_indx = np.bitwise_and(minindx,maxindx)\n all_radec = []\n all_radec.append(self.all_radec_orig[0][flux_indx])\n all_radec.append(self.all_radec_orig[1][flux_indx])\n all_radec.append(self.all_radec_orig[2][flux_indx])\n all_radec.append(np.arange(len(self.all_radec_orig[0][flux_indx])))\n\n if clip_prefix+'nbright' in self.pars and \\\n self.pars[clip_prefix+'nbright'] is not None:\n clip_catalog = True\n\n nbright = self.pars[clip_prefix+'nbright']\n\n # pick out only the brightest 'nbright' sources\n if self.pars[clip_prefix+'fluxunits'] == 'mag':\n nbslice = slice(None,nbright)\n else:\n nbslice = slice(nbright,None)\n\n if all_radec is None:\n # work on copy of all original data\n all_radec = copy.deepcopy(self.all_radec_orig)\n # find indices of brightest\n nbright_indx = np.argsort(all_radec[2])[nbslice]\n self.all_radec[0] = all_radec[0][nbright_indx]\n self.all_radec[1] = all_radec[1][nbright_indx]\n self.all_radec[2] = all_radec[2][nbright_indx]\n self.all_radec[3] = np.arange(len(all_radec[0][nbright_indx]))\n\n else:\n if all_radec is not None:\n self.all_radec = copy.deepcopy(all_radec)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef match(self,refimage, quiet_identity, **kwargs):\n ref_outxy = refimage.outxy\n refWCS = refimage.wcs\n refname = refimage.name\n ref_inxy = refimage.xy_catalog\n\n cat_src_type = kwargs['cat_src_type']\n del kwargs['cat_src_type']\n\n if not quiet_identity:\n print(\"Matching sources from \\'{}\\' with sources from \"\n \"reference {} \\'{}\\'\"\n .format(self.name, cat_src_type, refname))\n #self.sortSkyCatalog() # apply any catalog sorting specified by the user\n self.transformToRef(refWCS)\n self.refWCS = refWCS\n # extract xyxymatch parameters from input parameters\n matchpars = kwargs.copy()\n self.match_pars = matchpars\n minobj = matchpars['minobj'] # needed for later\n del matchpars['minobj'] # not needed in xyxymatch\n\n self.goodmatch = True\n\n # Check to see whether or not it is being matched to itself\n if refname.strip() == self.name.strip():\n self.identityfit = True\n if not quiet_identity:\n log.info('NO fit performed for reference image: %s\\n'%self.name)\n else:\n # convert tolerance from units of arcseconds to pixels, as needed\n radius = matchpars['searchrad']\n if matchpars['searchunits'] == 'arcseconds':\n radius /= refWCS.pscale\n\n # Determine xyoff (X,Y offset) and tolerance to be used with xyxymatch\n if matchpars['use2dhist']:\n xsh, ysh, maxval, flux, zpmat, qual = _estimate_2dhist_shift(\n self.outxy,\n ref_outxy,\n searchrad=radius\n )\n xyoff = (xsh, ysh)\n\n if matchpars['see2dplot']:\n zpstd = max(10, flux // 5) if qual else 10\n title_str = (\"Histogram of offsets: Peak has {:d} matches \"\n \"at ({:0.4g}, {:0.4g})\"\n .format(maxval, xsh, ysh))\n hist_name = None\n if not self.interactive:\n hist_name = 'hist2d_{0}.png'.format(self.rootname)\n\n plot_pars = {\n 'data': zpmat,\n 'figure_id': self.figure_id,\n 'vmax': zpstd,\n 'xp': xsh,\n 'yp': ysh,\n 'searchrad': radius,\n 'title_str': title_str,\n 'plotname': hist_name,\n 'interactive': self.interactive\n }\n\n tweakutils.plot_zeropoint(plot_pars)\n\n if matchpars['see2dplot'] and ('residplot' in matchpars and\n 'No' in matchpars['residplot']):\n if self.interactive:\n prompt = (\"Press ENTER for next image, \\n\" +\n \" 'n' to continue without updating header or \\n\" +\n \" 'q' to quit immediately...\\n\")\n\n if sys.version_info[0] >= 3:\n a = input(prompt)\n else:\n a = raw_input(prompt)\n\n else:\n a = ' '\n\n if 'n' in a.lower():\n self.perform_update = False\n\n if 'q' in a.lower():\n self.quit_immediately = True\n\n if matchpars['see2dplot']:\n self.figure_id += 1\n\n else:\n xoff = 0.\n yoff = 0.\n if not util.is_blank(matchpars['xoffset']):\n xoff = matchpars['xoffset']\n if not util.is_blank(matchpars['yoffset']):\n yoff = matchpars['yoffset']\n xyoff = (xoff, yoff)\n\n matches = xyxymatch(self.outxy, ref_outxy, origin=xyoff,\n tolerance=matchpars['tolerance'],\n separation=matchpars['separation'])\n\n if len(matches) > minobj:\n self.matches['image'] = np.column_stack([matches['input_x'][:,\n np.newaxis],matches['input_y'][:,np.newaxis]])\n self.matches['ref'] = np.column_stack([matches['ref_x'][:,\n np.newaxis],matches['ref_y'][:,np.newaxis]])\n self.matches['ref_idx'] = matches['ref_idx']\n self.matches['img_idx'] = self.all_radec[3][matches['input_idx']]\n self.matches['input_idx'] = matches['input_idx']\n self.matches['img_RA'] = self.all_radec[0][matches['input_idx']]\n self.matches['img_DEC'] = self.all_radec[1][matches['input_idx']]\n\n self.matches['ref_orig_xy'] = np.column_stack([\n np.array(ref_inxy[0])[matches['ref_idx']][:,np.newaxis],\n np.array(ref_inxy[1])[matches['ref_idx']][:,np.newaxis]])\n\n self.matches['img_orig_xy'] = np.column_stack([\n np.array(self.xy_catalog[0])[matches['input_idx']][:,np.newaxis],\n np.array(self.xy_catalog[1])[matches['input_idx']][:,np.newaxis]])\n self.matches['src_origin'] = ref_inxy[-1][matches['ref_idx']]\n print('Found %d matches for %s...'%(len(matches),self.name))\n\n if self.pars['writecat']:\n matchfile = open(self.catalog_names['match'],mode='w+')\n matchfile.write('#Reference: %s\\n'%refname)\n matchfile.write('#Input: %s\\n'%self.name)\n title = '#Ref_X Ref_Y '\n title += 'Input_X Input_Y '\n title += 'Ref_X0 Ref_Y0 '\n title += 'Input_X0 Input_Y0 '\n title += 'Ref_ID Input_ID '\n title += 'Ref_Source\\n'\n fmtstr = '%0.6f %0.6f '*4\n fmtstr += '%d %d %s\\n'\n matchfile.write(title)\n for i in range(len(matches['input_x'])):\n linestr = fmtstr%\\\n (matches['ref_x'][i],matches['ref_y'][i],\\\n matches['input_x'][i],matches['input_y'][i],\n self.matches['ref_orig_xy'][:,0][i],\n self.matches['ref_orig_xy'][:,1][i],\n self.matches['img_orig_xy'][:,0][i],\n self.matches['img_orig_xy'][:,1][i],\n matches['ref_idx'][i],matches['input_idx'][i],\n self.matches['src_origin'][i])\n matchfile.write(linestr)\n matchfile.close()\n else:\n warnstr = textutil.textbox('WARNING: \\n'+\n 'Not enough matches (< %d) found for input image: %s'%(minobj,self.name))\n for line in warnstr.split('\\n'):\n log.warning(line)\n print(warnstr)\n self.goodmatch = False", "response": "Matches this image with a reference image."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nperform a fit between the matched sources.", "response": "def performFit(self,**kwargs):\n \"\"\" Perform a fit between the matched sources.\n\n Parameters\n ----------\n kwargs : dict\n Parameter necessary to perform the fit; namely, *fitgeometry*.\n\n Notes\n -----\n This task still needs to implement (eventually) interactive iteration of\n the fit to remove outliers.\n \"\"\"\n assert(self.refWCS is not None)\n pars = kwargs.copy()\n self.fit_pars = pars\n\n self.fit = {'offset':[0.0,0.0],'rot':0.0,'scale':[1.0],'rms':[0.0,0.0],\n 'rms_keys':{'RMS_RA':0.0,'RMS_DEC':0.0,'NMATCH':0},\n 'fit_matrix':[[1.0,0.0],[0.0,1.0]], 'src_origin':[None]}\n\n if not self.identityfit:\n if self.matches is not None and self.goodmatch:\n self.fit = linearfit.iter_fit_all(\n self.matches['image'],self.matches['ref'],\n self.matches['img_idx'],self.matches['ref_idx'],\n xyorig=self.matches['img_orig_xy'],\n uvorig=self.matches['ref_orig_xy'],\n mode=pars['fitgeometry'],nclip=pars['nclip'],\n sigma=pars['sigma'],minobj=pars['minobj'],\n center=self.refWCS.wcs.crpix,\n verbose=self.verbose)\n\n self.fit['rms_keys'] = self.compute_fit_rms()\n radec_fit = self.refWCS.all_pix2world(self.fit['fit_xy'],1)\n self.fit['fit_RA'] = radec_fit[:,0]\n self.fit['fit_DEC'] = radec_fit[:,1]\n self.fit['src_origin'] = self.matches['src_origin']\n\n print('Computed ',pars['fitgeometry'],' fit for ',self.name,': ')\n if pars['fitgeometry'] == 'shift':\n print(\"XSH: {:.4f} YSH: {:.4f}\"\n .format(self.fit['offset'][0],\n self.fit['offset'][1]))\n elif pars['fitgeometry'] == 'rscale' and self.fit['proper']:\n print(\"XSH: {:.4f} YSH: {:.4f} ROT: {:.10g} \"\n \"SCALE: {:.6f}\".format(\n self.fit['offset'][0],\n self.fit['offset'][1],\n self.fit['rot'],\n self.fit['scale'][0]))\n elif pars['fitgeometry'] == 'general' or \\\n (pars['fitgeometry'] == 'rscale' and not self.fit['proper']):\n print(\"XSH: {:.4f} YSH: {:.4f} PROPER ROT: {:.10g} \"\n \"\".format(\n self.fit['offset'][0],\n self.fit['offset'][1],\n self.fit['rot']))\n print(\": {:.10g} SKEW: {:.10g} ROT_X: {:.10g} \"\n \"ROT_Y: {:.10g}\".format(\n self.fit['rotxy'][2],\n self.fit['skew'],\n self.fit['rotxy'][0],\n self.fit['rotxy'][1]))\n print(\": {:.10g} SCALE_X: {:.10g} \"\n \"SCALE_Y: {:.10g}\".format(\n self.fit['scale'][0],\n self.fit['scale'][1],\n self.fit['scale'][2]))\n else:\n assert(False)\n\n print('FIT XRMS: {:<7.2g} FIT YRMS: {:<7.2g}'\n .format(*self.fit['rms']))\n print('FIT RMSE: {:<7.2g} FIT MAE: {:<7.2g}\\n'\n .format(self.fit['rmse'], self.fit['mae']))\n print('RMS_RA: %.2g (deg) RMS_DEC: %.2g (deg)\\n'%(\n self.fit['rms_keys']['RMS_RA'],\n self.fit['rms_keys']['RMS_DEC']))\n print('Final solution based on ',self.fit['rms_keys']['NMATCH'],' objects.')\n\n self.write_fit_catalog()\n\n # Plot residuals, if requested by the user\n if 'residplot' in pars and \"No\" not in pars['residplot']:\n xy = self.fit['img_coords']\n resids = self.fit['resids']\n xy_fit = xy + resids\n title_str = 'Residuals\\ for\\ {0}\\ using\\ {1:6d}\\ sources'.format(\n self.name.replace('_','\\_'),self.fit['rms_keys']['NMATCH'])\n if not self.interactive:\n resid_name = 'residuals_{0}.png'.format(self.rootname)\n vector_name = resid_name.replace('residuals','vector')\n else:\n resid_name = None\n vector_name = None\n if pars['residplot'] == 'both':\n tweakutils.make_vector_plot(None,\n data=[xy[:,0],xy[:,1],xy_fit[:,0],xy_fit[:,1]],\n figure_id=self.figure_id, vector=True,\n labelsize=pars['labelsize'],\n plotname=vector_name, title=title_str)\n ptype=False # Setup\n self.figure_id += 1\n elif pars['residplot'] == 'vector':\n ptype = True\n else:\n ptype = False\n\n # Generate new plot\n tweakutils.make_vector_plot(None,\n data=[xy[:,0],xy[:,1],xy_fit[:,0],xy_fit[:,1]],\n figure_id=self.figure_id, vector=ptype,\n ylimit=pars['ylimit'], labelsize=pars['labelsize'],\n plotname=resid_name, title=title_str)\n if self.interactive:\n prompt = (\"Press ENTER for next image, \\n\" +\n \" 'n' to continue without updating header or \\n\" +\n \" 'q' to quit immediately...\\n\")\n if sys.version_info[0] >= 3:\n a = input(prompt)\n else:\n a = raw_input(prompt)\n else:\n a = ' '\n if 'n' in a.lower():\n self.perform_update = False\n if 'q' in a.lower():\n self.quit_immediately = True\n else:\n self.fit['offset'] = [np.nan,np.nan]\n self.fit['rot'] = np.nan\n self.fit['scale'] = [np.nan]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef updateHeader(self, wcsname=None, reusename=False):\n # Insure filehandle is open and available...\n self.openFile()\n\n verbose_level = 1\n if not self.perform_update:\n verbose_level = 0\n # Create WCSCORR table to keep track of WCS revisions anyway\n if self.perform_update:\n wcscorr.init_wcscorr(self._im.hdu)\n\n extlist = []\n wcscorr_extname = self.ext_name\n if self.ext_name == \"PRIMARY\":\n extlist = [0]\n else:\n for ext in range(1,self.nvers+1):\n extlist.append((self.ext_name,ext))\n # add WCSNAME to SCI headers, if not provided (such as for\n # drizzled images directly obtained from the archive pre-AD)\n if ('wcsname' not in self._im.hdu[self.ext_name,ext].header and\n self._im.hdu.fileinfo(0)['filemode'] == 'update'):\n self._im.hdu[self.ext_name,ext].header['wcsname'] = 'Default'\n\n if not self.identityfit and self.goodmatch and \\\n self.fit['offset'][0] != np.nan:\n updatehdr.updatewcs_with_shift(self._im.hdu, self.refWCS,\n wcsname=wcsname, reusename=reusename,\n fitgeom=self.fit_pars['fitgeometry'],\n xsh=self.fit['offset'][0],ysh=self.fit['offset'][1],\n rot=self.fit['rot'],scale=self.fit['scale'][0],\n fit=self.fit['fit_matrix'], verbose=verbose_level,\n xrms=self.fit['rms_keys']['RMS_RA'],\n yrms=self.fit['rms_keys']['RMS_DEC'])\n\n wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0])\n\n altkeys = []\n for k in wnames:\n if wnames[k] == wcsname:\n altkeys.append(k)\n if len(altkeys) > 1 and ' ' in altkeys:\n altkeys.remove(' ')\n if len(altkeys) == 0:\n next_key = ' '\n else:\n next_key = altkeys[-1]\n if self.perform_update:\n log.info(' Writing out new WCS to alternate WCS: \"%s\"'%next_key)\n\n self.next_key = next_key\n else: #if self.identityfit or not self.goodmatch:\n if reusename:\n # Look for key of WCS with this name\n next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname)\n # This wcsname is new, so start fresh\n if next_key is None:\n next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)\n else:\n # Find key for next WCS and save again to replicate an updated solution\n next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)\n\n if self.perform_update:\n # archive current WCS as alternate WCS with specified WCSNAME\n # Start by archiving original PRIMARY WCS\n wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0])\n\n # Define a default WCSNAME in the case that the file to be\n # updated did not have the WCSNAME keyword defined already\n # (as will happen when updating images that have not been\n # updated using updatewcs).\n if len(wnames) == 0:\n pri_wcsname = None\n else:\n # Safeguard against headers not having WCSNAME defined\n # This would occur if they were written out by something\n # other than stwcs.updatewcs v\n if ' ' not in wnames:\n self._im.hdu[extlist[0]].header['wscname'] = ''\n wnames[' '] = ''\n pri_wcsname = wnames[' ']\n\n next_pkey = altwcs.getKeyFromName(fits.getheader(self.name, extlist[0], memmap=False),pri_wcsname)\n log.info(' Saving Primary WCS to alternate WCS: \"%s\"'%next_pkey)\n\n altwcs.archiveWCS(self._im.hdu, extlist,\n wcskey=next_pkey, wcsname=pri_wcsname,\n reusekey=True)\n if reusename:\n # Look for key of WCS with this name\n next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname)\n # This wcsname is new, so start fresh\n if next_key is None:\n next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)\n else:\n # Find key for next WCS and save again to replicate an updated solution\n next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)\n # update WCSNAME to be the new name\n for ext in extlist:\n self._im.hdu[ext].header['WCSNAME'] = wcsname\n\n # save again using new WCSNAME\n altwcs.archiveWCS(self._im.hdu, extlist,\n wcskey=next_key,wcsname=wcsname, reusekey=reusename)\n self.next_key = ' '\n\n # add FIT values to image's PRIMARY header\n fimg = self._im.hdu\n\n if wcsname in ['',' ',None,\"INDEF\"]:\n wcsname = 'TWEAK'\n # Record values for the fit with both the PRIMARY WCS being updated\n # and the alternate WCS which will be created.\n assert(not self._im.closed)\n\n for ext in extlist:\n self._im.hdu[ext].header['FITNAME'+next_key] = wcsname\n for kw in self.fit['rms_keys']:\n self._im.hdu[ext].header.set(kw+next_key,\n self.fit['rms_keys'][kw],\n after='FITNAME'+next_key)\n\n if self.perform_update:\n log.info('Updating WCSCORR table with new WCS solution \"%s\"'%wcsname)\n wcscorr.update_wcscorr(self._im.hdu, wcs_id=wcsname,\n extname=self.ext_name)", "response": "Update the header of the image with shifts computed by perform_fit."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef writeHeaderlet(self,**kwargs):\n # Insure filehandle is open and available...\n self.openFile()\n\n pars = kwargs.copy()\n rms_pars = self.fit['rms_keys']\n\n str_kw = ['descrip','history','author','hdrfile']\n for kw in str_kw:\n if pars[kw] == '': pars[kw] = None\n\n # Call function with properly interpreted input parameters\n # Syntax: write_headerlet(filename, hdrname, output, sciext='SCI',\n # wcsname=None, wcskey=None, destim=None,\n # sipname=None, npolfile=None, d2imfile=None,\n # author=None, descrip=None, history=None,\n # rms_ra=None, rms_dec=None, nmatch=None, catalog=None,\n # attach=True, clobber=False):\n headerlet.write_headerlet(self._im.hdu, pars['hdrname'],\n output=pars['hdrfile'],\n wcsname=None, wcskey=self.next_key, destim=None,\n sipname=None, npolfile=None, d2imfile=None,\n author=pars['author'], descrip=pars['descrip'],\n history=pars['history'],\n nmatch=rms_pars['NMATCH'],catalog=pars['catalog'],\n attach=pars['attach'], clobber=pars['clobber']\n )", "response": "Write a headerlet based on update to PRIMARY WCS\n "} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef write_skycatalog(self,filename):\n if self.all_radec is None:\n return\n ralist = self.all_radec[0]#.tolist()\n declist = self.all_radec[1]#.tolist()\n f = open(filename,'w')\n f.write(\"#Sky positions for: \"+self.name+'\\n')\n f.write(\"#RA Dec\\n\")\n f.write(\"#(deg) (deg)\\n\")\n for i in range(len(ralist)):\n f.write('%0.12f %0.12f\\n'%(ralist[i],declist[i]))\n f.close()", "response": "Write out the all_radec catalog for this image to a file."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns a string with the names of input_xy catalog names", "response": "def get_xy_catnames(self):\n \"\"\" Return a string with the names of input_xy catalog names\n \"\"\"\n catstr = self.name+' '\n if 'input_xy' in self.catalog_names:\n for xycat in self.catalog_names['input_xy']:\n catstr += ' '+xycat\n return catstr + '\\n'"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nwrite out the catalog of all sources and resids used in the final fit.", "response": "def write_fit_catalog(self):\n \"\"\" Write out the catalog of all sources and resids used in the final fit.\n \"\"\"\n if self.pars['writecat']:\n log.info('Creating catalog for the fit: {:s}'.format(self.catalog_names['fitmatch']))\n f = open(self.catalog_names['fitmatch'],'w')\n f.write('# Input image: {:s}\\n'.format(self.filename))\n f.write('# Coordinate mapping parameters: \\n')\n f.write('# X and Y rms: {:.2g} {:.2g}\\n'\n .format(self.fit['rms'][0], self.fit['rms'][1]))\n f.write('# X and Y shift: {:.4f} {:.4f}\\n'\n .format(self.fit['offset'][0], self.fit['offset'][1]))\n f.write('# X and Y scale: {:.10g} {:.10g}\\n'\n .format(self.fit['scale'][1], self.fit['scale'][2]))\n f.write('# X and Y rotation: {:.10g} {:.10g}\\n'\n .format(self.fit['rotxy'][0], self.fit['rotxy'][1]))\n f.write('# and skew: {:.10g} {:.10g}\\n'\n .format(self.fit['rotxy'][2], self.fit['skew']))\n\n f.write('# \\n# Input Coordinate Listing\\n')\n f.write('# Column 1: X (reference)\\n')\n f.write('# Column 2: Y (reference)\\n')\n f.write('# Column 3: X (input)\\n')\n f.write('# Column 4: Y (input)\\n')\n f.write('# Column 5: X (fit)\\n')\n f.write('# Column 6: Y (fit)\\n')\n f.write('# Column 7: X (residual)\\n')\n f.write('# Column 8: Y (residual)\\n')\n f.write('# Column 9: Original X (reference)\\n')\n f.write('# Column 10: Original Y (reference)\\n')\n f.write('# Column 11: Original X (input)\\n')\n f.write('# Column 12: Original Y (input)\\n')\n f.write('# Column 13: Ref ID\\n')\n f.write('# Column 14: Input ID\\n')\n f.write('# Column 15: Input EXTVER ID \\n')\n f.write('# Column 16: RA (fit)\\n')\n f.write('# Column 17: Dec (fit)\\n')\n f.write('# Column 18: Ref source provenience\\n')\n #\n # Need to add chip ID for each matched source to the fitmatch file\n # The chip information can be extracted from the following source:\n #\n # self.chip_catalogs[sci_extn] = {'catalog':catalog,'wcs':wcs}\n # xypos = catalog.xypos\n img_chip_id = self.fit['img_indx'].copy()\n for sci_extn in range(1,self.nvers+1):\n catalog = self.chip_catalogs[sci_extn]['catalog']\n if catalog.xypos is not None:\n img_indx_orig = self.chip_catalogs[sci_extn]['catalog'].xypos[3]\n chip_min = img_indx_orig.min()\n chip_max = img_indx_orig.max()\n cid = np.logical_and((img_chip_id >= chip_min),(img_chip_id <= chip_max))\n img_chip_id[cid] = sci_extn\n #\n f.write('#\\n')\n f.close()\n\n xydata = [[self.fit['ref_coords'][:,0],self.fit['ref_coords'][:,1],\n self.fit['img_coords'][:,0],self.fit['img_coords'][:,1],\n self.fit['fit_xy'][:,0],self.fit['fit_xy'][:,1],\n self.fit['resids'][:,0],self.fit['resids'][:,1],\n self.fit['ref_orig_xy'][:,0],\n self.fit['ref_orig_xy'][:,1],\n self.fit['img_orig_xy'][:,0],\n self.fit['img_orig_xy'][:,1]],\n [self.fit['ref_indx'],self.fit['img_indx'],img_chip_id],\n [self.fit['fit_RA'],self.fit['fit_DEC']],\n [self.fit['src_origin']]\n ]\n\n tweakutils.write_xy_file(self.catalog_names['fitmatch'],xydata,\n append=True,format=[\"%15.6f\",\"%8d\",\"%20.12f\",\" %s\"])"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef write_outxy(self,filename):\n f = open(filename,'w')\n f.write(\"#Pixel positions for: \"+self.name+'\\n')\n f.write(\"#X Y\\n\")\n f.write(\"#(pix) (pix)\\n\")\n for i in range(self.all_radec[0].shape[0]):\n f.write('%f %f\\n'%(self.outxy[i,0],self.outxy[i,1]))\n f.close()", "response": "Write out the output XY catalog for this image to a file."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the information for a shiftfile for this image to provide the MultiDrizzle - based MultiDrizzle compatability with the IRAF - based MultiDrizzle.", "response": "def get_shiftfile_row(self):\n \"\"\" Return the information for a shiftfile for this image to provide\n compatability with the IRAF-based MultiDrizzle.\n \"\"\"\n if self.fit is not None:\n rowstr = '%s %0.6f %0.6f %0.6f %0.6f %0.6f %0.6f\\n'%(\n self.name,self.fit['offset'][0],self.fit['offset'][1],\n self.fit['rot'],self.fit['scale'][0],\n self.fit['rms'][0],self.fit['rms'][1])\n else:\n rowstr = None\n return rowstr"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef clean(self):\n #TODO: add cleaning of mask files, *if* created ...\n for f in self.catalog_names:\n if 'match' in f:\n if os.path.exists(self.catalog_names[f]):\n log.info('Deleting intermediate match file: %s'%\n self.catalog_names[f])\n os.remove(self.catalog_names[f])\n else:\n for extn in f:\n if os.path.exists(extn):\n log.info('Deleting intermediate catalog: %d'%extn)\n os.remove(extn)", "response": "Remove intermediate files created."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef write_skycatalog(self, filename, show_flux=False, show_id=False):\n f = open(filename,'w')\n f.write(\"#Sky positions for cumulative reference catalog. Initial catalog from: \"+self.name+'\\n')\n header1 = \"#RA Dec\"\n header2 = \"#(deg) (deg)\"\n if show_flux:\n header1 += \" Flux\"\n header2 += \" (counts)\"\n if show_id:\n header1 += \" ID Origin\"\n header2 += \"\"\n header1 += \"\\n\"\n header2 += \"\\n\"\n\n f.write(header1)\n f.write(header2)\n\n show_details = show_flux or show_id\n flux_end_char = ''\n if show_details and show_flux:\n if show_id:\n flux_end_char = '\\t'\n\n for i in range(self.all_radec[0].shape[0]):\n src_line = \"{:.7f} {:.7f}\" \\\n .format(self.all_radec[0][i], self.all_radec[1][i])\n if show_details:\n #src_line += \" #\"\n if show_flux:\n #src_line += \" flux: {:.5g}{:s}\" \\\n #.format(self.xy_catalog[2][i], flux_end_char)\n src_line += \" {:.5g}\".format(self.xy_catalog[2][i])\n if show_id:\n #src_line += \" ID: {:d}\\torigin: '{:s}'\" \\\n #.format(self.xy_catalog[3][i], self.xy_catalog[-1][i])\n src_line += \" {:d} {:s}\".format(self.xy_catalog[3][i],\n self.xy_catalog[-1][i])\n f.write(src_line + '\\n')\n f.close()", "response": "Write out the all_radec catalog for this image to a file."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ntransforming the reference catalog sky positions to reference tangent plane.", "response": "def transformToRef(self):\n \"\"\" Transform reference catalog sky positions (self.all_radec)\n to reference tangent plane (self.wcs) to create output X,Y positions.\n \"\"\"\n if 'refxyunits' in self.pars and self.pars['refxyunits'] == 'pixels':\n log.info('Creating RA/Dec positions for reference sources...')\n self.outxy = np.column_stack([self.all_radec[0][:,np.newaxis],self.all_radec[1][:,np.newaxis]])\n skypos = self.wcs.wcs_pix2world(self.all_radec[0],self.all_radec[1],self.origin)\n self.all_radec[0] = skypos[0]\n self.all_radec[1] = skypos[1]\n else:\n log.info('Converting RA/Dec positions of reference sources from \"%s\" to '%self.name+\n 'X,Y positions in reference WCS...')\n self.refWCS = self.wcs\n outxy = self.wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin)\n # convert outxy list to a Nx2 array\n self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]])"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef clean(self):\n if not util.is_blank(self.catalog.catname) and os.path.exists(self.catalog.catname):\n os.remove(self.catalog.catname)", "response": "Remove intermediate files created by this instance of the class."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef close(self):\n if self._image is None:\n return\n\n # mcara: I think the code below is not necessary but in order to\n # preserve the same functionality as the code removed below,\n # I make an empty copy of the image object:\n empty_image = fits.HDUList()\n for u in self._image:\n empty_image.append(u.__class__(data=None, header=None))\n # mcara: END unnecessary code\n\n self._image.close() #calls fits.close()\n\n self._image = empty_image", "response": "Close the image object and release all the data arrays."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef clean(self):\n clean_files = ['blotImage','crmaskImage','finalMask',\n 'staticMask','singleDrizMask','outSky',\n 'outSContext','outSWeight','outSingle',\n 'outMedian','dqmask','tmpmask',\n 'skyMatchMask']\n\n log.info('Removing intermediate files for %s' % self._filename)\n # We need to remove the combined products first; namely, median image\n util.removeFileSafely(self.outputNames['outMedian'])\n # Now remove chip-specific intermediate files, if any were created.\n for chip in self.returnAllChips(extname='SCI'):\n for fname in clean_files:\n if fname in chip.outputNames:\n util.removeFileSafely(chip.outputNames[fname])", "response": "Deletes intermediate products generated for this imageObject."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns just the data array from the specified extension fileutil is used instead of fits to account for non - accurate FITS input images.", "response": "def getData(self,exten=None):\n \"\"\" Return just the data array from the specified extension\n fileutil is used instead of fits to account for non-\n FITS input images. openImage returns a fits object.\n \"\"\"\n if exten.lower().find('sci') > -1:\n # For SCI extensions, the current file will have the data\n fname = self._filename\n else:\n # otherwise, the data being requested may need to come from a\n # separate file, as is the case with WFPC2 DQ data.\n #\n # convert exten to 'sci',extver to get the DQ info for that chip\n extn = exten.split(',')\n sci_chip = self._image[self.scienceExt,int(extn[1])]\n fname = sci_chip.dqfile\n\n extnum = self._interpretExten(exten)\n if self._image[extnum].data is None:\n if os.path.exists(fname):\n _image=fileutil.openImage(fname, clobber=False, memmap=False)\n _data=fileutil.getExtn(_image, extn=exten).data\n _image.close()\n del _image\n self._image[extnum].data = _data\n else:\n _data = None\n else:\n _data = self._image[extnum].data\n\n return _data"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef getHeader(self,exten=None):\n _image=fileutil.openImage(self._filename, clobber=False, memmap=False)\n _header=fileutil.getExtn(_image,extn=exten).header\n _image.close()\n del _image\n return _header", "response": "Return just the specified header extension fileutil\n is used instead of fits."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nwrites out updated data and header to the original input file for this object.", "response": "def updateData(self,exten,data):\n \"\"\" Write out updated data and header to\n the original input file for this object.\n \"\"\"\n _extnum=self._interpretExten(exten)\n fimg = fileutil.openImage(self._filename, mode='update', memmap=False)\n fimg[_extnum].data = data\n fimg[_extnum].header = self._image[_extnum].header\n fimg.close()"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef getAllData(self,extname=None,exclude=None):\n\n extensions = self._findExtnames(extname=extname,exclude=exclude)\n\n for i in range(1,self._nextend+1,1):\n if hasattr(self._image[i],'_extension') and \\\n \"IMAGE\" in self._image[i]._extension:\n extver = self._image[i].header['extver']\n if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member:\n self._image[i].data=self.getData(self._image[i].extname + ','+str(self._image[i].extver))", "response": "This function gets all the data from the original\n file and attaches it to the image object."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a list containing all the chips which match the given extname minus those specified for exclusion.", "response": "def returnAllChips(self,extname=None,exclude=None):\n \"\"\" Returns a list containing all the chips which match the\n extname given minus those specified for exclusion (if any).\n \"\"\"\n extensions = self._findExtnames(extname=extname,exclude=exclude)\n chiplist = []\n for i in range(1,self._nextend+1,1):\n if 'extver' in self._image[i].header:\n extver = self._image[i].header['extver']\n else:\n extver = 1\n if hasattr(self._image[i],'_extension') and \\\n \"IMAGE\" in self._image[i]._extension:\n if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member:\n chiplist.append(self._image[i])\n return chiplist"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _findExtnames(self, extname=None, exclude=None):\n #make a list of the available extension names for the object\n extensions=[]\n if extname is not None:\n if not isinstance(extname,list): extname=[extname]\n for extn in extname:\n extensions.append(extn.upper())\n else:\n #restore all the extensions data from the original file, be careful here\n #if you've altered data in memory you want to keep!\n for i in range(1,self._nextend+1,1):\n if hasattr(self._image[i],'_extension') and \\\n \"IMAGE\" in self._image[i]._extension:\n if self._image[i].extname.upper() not in extensions:\n extensions.append(self._image[i].extname)\n #remove this extension from the list\n if exclude is not None:\n exclude.upper()\n if exclude in extensions:\n newExt=[]\n for item in extensions:\n if item != exclude:\n newExt.append(item)\n extensions=newExt\n del newExt\n return extensions", "response": "This method finds all the available extensions for the image and returns a list of all the available extensions for the image."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef findExtNum(self, extname=None, extver=1):\n extnum = None\n extname = extname.upper()\n\n if not self._isSimpleFits:\n for ext in self._image:\n if (hasattr(ext,'_extension') and 'IMAGE' in ext._extension and\n (ext.extname == extname) and (ext.extver == extver)):\n extnum = ext.extnum\n else:\n log.info(\"Image is simple fits\")\n\n return extnum", "response": "Find the extension number of the give extname and extver."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nassigning a unique rootname for the image based in the expname.", "response": "def _assignRootname(self, chip):\n \"\"\" Assign a unique rootname for the image based in the expname. \"\"\"\n extname=self._image[self.scienceExt,chip].header[\"EXTNAME\"].lower()\n extver=self._image[self.scienceExt,chip].header[\"EXTVER\"]\n expname = self._rootname\n\n # record extension-based name to reflect what extension a mask file corresponds to\n self._image[self.scienceExt,chip].rootname=expname + \"_\" + extname + str(extver)\n self._image[self.scienceExt,chip].sciname=self._filename + \"[\" + extname +\",\"+str(extver)+\"]\"\n self._image[self.scienceExt,chip].dqrootname=self._rootname + \"_\" + extname + str(extver)\n # Needed to keep EXPNAMEs associated properly (1 EXPNAME for all chips)\n self._image[self.scienceExt,chip]._expname=expname\n self._image[self.scienceExt,chip]._chip =chip"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _setOutputNames(self,rootname,suffix='_drz'):\n # Define FITS output filenames for intermediate products\n\n # Build names based on final DRIZZLE output name\n # where 'output' normally would have been created\n # by 'process_input()'\n #\n outFinal = rootname+suffix+'.fits'\n outSci = rootname+suffix+'_sci.fits'\n outWeight = rootname+suffix+'_wht.fits'\n outContext = rootname+suffix+'_ctx.fits'\n outMedian = rootname+'_med.fits'\n\n # Build names based on input name\n origFilename = self._filename.replace('.fits','_OrIg.fits')\n outSky = rootname + '_sky.fits'\n outSingle = rootname+'_single_sci.fits'\n outSWeight = rootname+'_single_wht.fits'\n crCorImage = rootname+'_crclean.fits'\n\n # Build outputNames dictionary\n fnames={\n 'origFilename': origFilename,\n 'outFinal': outFinal,\n 'outMedian': outMedian,\n 'outSci': outSci,\n 'outWeight': outWeight,\n 'outContext': outContext,\n 'outSingle': outSingle,\n 'outSWeight': outSWeight,\n 'outSContext': None,\n 'outSky': outSky,\n 'crcorImage': crCorImage,\n 'ivmFile': None\n }\n\n return fnames", "response": "Define the default output names for drizzle products and their intermediate products."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ninitialize the virtual output structures for this image.", "response": "def _initVirtualOutputs(self):\n \"\"\" Sets up the structure to hold all the output data arrays for\n this image in memory.\n \"\"\"\n self.virtualOutputs = {}\n for product in self.outputNames:\n self.virtualOutputs[product] = None"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef saveVirtualOutputs(self,outdict):\n if not self.inmemory:\n return\n for outname in outdict:\n self.virtualOutputs[outname] = outdict[outname]", "response": "Assign in - memory versions of generated products based on dictionary outdict."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns the name of the file or PyFITS object associated with that name depending on the setting of self. inmemory.", "response": "def getOutputName(self,name):\n \"\"\" Return the name of the file or PyFITS object associated with that\n name, depending on the setting of self.inmemory.\n \"\"\"\n val = self.outputNames[name]\n if self.inmemory: # if inmemory was turned on...\n # return virtualOutput object saved with that name\n val = self.virtualOutputs[val]\n return val"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef updateOutputValues(self,output_wcs):\n outputvals = self.outputValues\n\n outputvals['output'] = output_wcs.outputNames['outFinal']\n outputvals['outnx'], outputvals['outny'] = output_wcs.wcs.pixel_shape\n outputvals['texptime'] = output_wcs._exptime\n outputvals['texpstart'] = output_wcs._expstart\n outputvals['texpend'] = output_wcs._expend\n outputvals['nimages'] = output_wcs.nimages\n\n outputvals['scale'] = output_wcs.wcs.pscale #/ self._image[self.scienceExt,1].wcs.pscale\n outputvals['exptime'] = self._exptime\n\n outnames = self.outputNames\n outnames['outMedian'] = output_wcs.outputNames['outMedian']\n outnames['outFinal'] = output_wcs.outputNames['outFinal']\n outnames['outSci'] = output_wcs.outputNames['outSci']\n outnames['outWeight'] = output_wcs.outputNames['outWeight']\n outnames['outContext'] = output_wcs.outputNames['outContext']", "response": "Update the output values for each chip in the image."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef updateContextImage(self, contextpar):\n self.createContext = contextpar\n if not contextpar:\n log.info('No context image will be created for %s' %\n self._filename)\n self.outputNames['outContext'] = None", "response": "Update the name of the context image to None."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef find_DQ_extension(self):\n dqfile = None\n dq_suffix=None\n if(self.maskExt is not None):\n for hdu in self._image:\n # Look for DQ extension in input file\n if 'extname' in hdu.header and hdu.header['extname'].lower() == self.maskExt.lower():\n dqfile = self._filename\n dq_suffix=self.maskExt\n break\n\n return dqfile,dq_suffix", "response": "Return the suffix for the data quality extension and the name of\n the file which that DQ extension should be read from."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getKeywordList(self, kw):\n kwlist = []\n for chip in range(1,self._numchips+1,1):\n sci_chip = self._image[self.scienceExt,chip]\n if sci_chip.group_member:\n kwlist.append(sci_chip.__dict__[kw])\n\n return kwlist", "response": "Returns a list of all attribute values for all active chips in the\n ."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef getflat(self, chip):\n sci_chip = self._image[self.scienceExt, chip]\n # The keyword for ACS flat fields in the primary header of the flt\n # file is pfltfile. This flat file is already in the required\n # units of electrons.\n\n # The use of fileutil.osfn interprets any environment variable, such as\n # jref$, used in the specification of the reference filename\n filename = fileutil.osfn(self._image[\"PRIMARY\"].header[self.flatkey])\n hdulist = None\n try:\n hdulist = fileutil.openImage(filename, mode='readonly',\n memmap=False)\n data = hdulist[(self.scienceExt, chip)].data\n\n if data.shape[0] != sci_chip.image_shape[0]:\n ltv2 = int(np.round(sci_chip.ltv2))\n else:\n ltv2 = 0\n size2 = sci_chip.image_shape[0] + ltv2\n\n if data.shape[1] != sci_chip.image_shape[1]:\n ltv1 = int(np.round(sci_chip.ltv1))\n else:\n ltv1 = 0\n size1 = sci_chip.image_shape[1] + ltv1\n\n flat = data[ltv2:size2, ltv1:size1]\n\n except FileNotFoundError:\n flat = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)\n log.warning(\"Cannot find flat field file '{}'\".format(filename))\n log.warning(\"Treating flatfield as a constant value of '1'.\")\n\n finally:\n if hdulist is not None:\n hdulist.close()\n\n return flat", "response": "Method for retrieving a detector s flat field."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getReadNoiseImage(self, chip):\n sci_chip = self._image[self.scienceExt,chip]\n return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype) * sci_chip._rdnoise", "response": "Method for returning the read noise image of a specific chip."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nnote ===== Return an array representing the exposure time per pixel for the detector. This method will be overloaded for IR detectors which have their own EXP arrays, namely, WFC3/IR and NICMOS images. :units: None Returns ======= exptimeimg : numpy array The method will return an array of the same shape as the image.", "response": "def getexptimeimg(self,chip):\n \"\"\"\n Notes\n =====\n Return an array representing the exposure time per pixel for the detector.\n This method will be overloaded for IR detectors which have their own\n EXP arrays, namely, WFC3/IR and NICMOS images.\n\n :units:\n None\n\n Returns\n =======\n exptimeimg : numpy array\n The method will return an array of the same shape as the image.\n\n \"\"\"\n sci_chip = self._image[self.scienceExt,chip]\n if sci_chip._wtscl_par == 'expsq':\n wtscl = sci_chip._exptime*sci_chip._exptime\n else:\n wtscl = sci_chip._exptime\n\n return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*wtscl"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns an array representing the dark image for the detector.", "response": "def getdarkimg(self,chip):\n \"\"\"\n Notes\n =====\n Return an array representing the dark image for the detector.\n\n The method will return an array of the same shape as the image.\n\n :units: electrons\n \"\"\"\n sci_chip = self._image[self.scienceExt,chip]\n return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.darkcurrent"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the sky image for the detector.", "response": "def getskyimg(self,chip):\n \"\"\"\n Notes\n =====\n Return an array representing the sky image for the detector. The value\n of the sky is what would actually be subtracted from the exposure by\n the skysub step.\n\n :units: electrons\n\n \"\"\"\n sci_chip = self._image[self.scienceExt,chip]\n return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the list of EXTVER values for extensions with name specified in extname.", "response": "def getExtensions(self, extname='SCI', section=None):\n \"\"\" Return the list of EXTVER values for extensions with name specified\n in extname.\n\n \"\"\"\n if section is None:\n numext = 0\n section = []\n for hdu in self._image:\n if 'extname' in hdu.header and hdu.header['extname'] == extname:\n section.append(hdu.header['extver'])\n else:\n if not isinstance(section,list):\n section = [section]\n\n return section"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncounts the number of extensions in the given file.", "response": "def _countEXT(self,extname=\"SCI\"):\n \"\"\" Count the number of extensions in the file with the given name\n (``EXTNAME``).\n\n \"\"\"\n count=0 #simple fits image\n\n if (self._image['PRIMARY'].header[\"EXTEND\"]):\n for i,hdu in enumerate(self._image):\n if i > 0:\n hduExtname = False\n if 'EXTNAME' in hdu.header:\n self._image[i].extnum=i\n self._image[i].extname=hdu.header[\"EXTNAME\"]\n hduExtname = True\n if 'EXTVER' in hdu.header:\n self._image[i].extver=hdu.header[\"EXTVER\"]\n else:\n self._image[i].extver = 1\n\n if ((extname is not None) and \\\n (hduExtname and (hdu.header[\"EXTNAME\"] == extname))) \\\n or extname is None:\n count=count+1\n return count"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef buildMask(self,chip,bits=0,write=False):\n dqarr = self.getData(exten=self.maskExt+','+str(chip))\n dqmask = buildmask.buildMask(dqarr,bits)\n\n if write:\n phdu = fits.PrimaryHDU(data=dqmask,header=self._image[self.maskExt,chip].header)\n dqmask_name = self._image[self.scienceExt,chip].dqrootname+'_dqmask.fits'\n log.info('Writing out DQ/weight mask: %s' % dqmask_name)\n if os.path.exists(dqmask_name): os.remove(dqmask_name)\n phdu.writeto(dqmask_name)\n del phdu\n self._image[self.scienceExt,chip].dqmaskname = dqmask_name\n # record the name of this mask file that was created for later\n # removal by the 'clean()' method\n self._image[self.scienceExt,chip].outputNames['dqmask'] = dqmask_name\n del dqarr\n return dqmask", "response": "Build a mask for a particular chip and return it."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef buildEXPmask(self, chip, dqarr):\n log.info(\"Applying EXPTIME weighting to DQ mask for chip %s\" %\n chip)\n #exparr = self.getexptimeimg(chip)\n exparr = self._image[self.scienceExt,chip]._exptime\n expmask = exparr*dqarr\n\n return expmask.astype(np.float32)", "response": "Builds a weight mask from an input DQ array and the exposure time\n per pixel for this chip."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef buildIVMmask(self ,chip, dqarr, scale):\n sci_chip = self._image[self.scienceExt,chip]\n ivmname = self.outputNames['ivmFile']\n\n if ivmname is not None:\n log.info(\"Applying user supplied IVM files for chip %s\" % chip)\n #Parse the input file name to get the extension we are working on\n extn = \"IVM,{}\".format(chip)\n\n #Open the mask image for updating and the IVM image\n ivm = fileutil.openImage(ivmname, mode='readonly', memmap=False)\n ivmfile = fileutil.getExtn(ivm, extn)\n\n # Multiply the IVM file by the input mask in place.\n ivmarr = ivmfile.data * dqarr\n\n ivm.close()\n\n else:\n log.info(\"Automatically creating IVM files for chip %s\" % chip)\n # If no IVM files were provided by the user we will\n # need to automatically generate them based upon\n # instrument specific information.\n\n flat = self.getflat(chip)\n RN = self.getReadNoiseImage(chip)\n darkimg = self.getdarkimg(chip)\n skyimg = self.getskyimg(chip)\n\n #exptime = self.getexptimeimg(chip)\n #exptime = sci_chip._exptime\n #ivm = (flat*exptime)**2/(darkimg+(skyimg*flat)+RN**2)\n ivm = (flat)**2/(darkimg+(skyimg*flat)+RN**2)\n\n # Multiply the IVM file by the input mask in place.\n ivmarr = ivm * dqarr\n\n # Update 'wt_scl' parameter to match use of IVM file\n sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4)\n #sci_chip._wtscl = 1.0/pow(scale,4)\n\n return ivmarr.astype(np.float32)", "response": "Builds a weight mask from an input DQ array and an IVM array derived from the input image."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nbuild a weight mask from an input DQ array and an ERR array and an exposure time.", "response": "def buildERRmask(self,chip,dqarr,scale):\n \"\"\"\n Builds a weight mask from an input DQ array and an ERR array\n associated with the input image.\n \"\"\"\n sci_chip = self._image[self.scienceExt,chip]\n\n # Set default value in case of error, or lack of ERR array\n errmask = dqarr\n\n if self.errExt is not None:\n try:\n # Attempt to open the ERR image.\n err = self.getData(exten=self.errExt+','+str(chip))\n\n log.info(\"Applying ERR weighting to DQ mask for chip %s\" %\n chip)\n\n # Multiply the scaled ERR file by the input mask in place.\n #exptime = self.getexptimeimg(chip)\n exptime = sci_chip._exptime\n errmask = (exptime/err)**2 * dqarr\n\n # Update 'wt_scl' parameter to match use of IVM file\n #sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4)\n sci_chip._wtscl = 1.0/pow(scale,4)\n\n del err\n\n except:\n # We cannot find an 'ERR' extension and the data isn't WFPC2.\n # Print a generic warning message and continue on with the\n # final drizzle step.\n\n print(textutil.textbox(\n 'WARNING: No ERR weighting will be applied to the mask '\n 'used in the final drizzle step! Weighting will be only '\n 'by exposure time.\\n\\nThe data provided as input does not '\n 'contain an ERR extension'), file=sys.stderr)\n print('\\n Continue with final drizzle step...', sys.stderr)\n else:\n # If we were unable to find an 'ERR' extension to apply, one\n # possible reason was that the input was a 'standard' WFPC2 data\n # file that does not actually contain an error array. Test for\n # this condition and issue a Warning to the user and continue on to\n # the final drizzle.\n\n print(textutil.textbox(\n \"WARNING: No ERR weighting will be applied to the mask used \"\n \"in the final drizzle step! Weighting will be only by \"\n \"exposure time.\\n\\nThe WFPC2 data provided as input does not \"\n \"contain ERR arrays. WFPC2 data is not supported by this \"\n \"weighting type.\\n\\nA workaround would be to create inverse \"\n \"variance maps and use 'IVM' as the final_wht_type. See the \"\n \"HELP file for more details on using inverse variance maps.\"),\n file=sys.stderr)\n print(\"\\n Continue with final drizzle step...\", file=sys.stderr)\n\n return errmask.astype(np.float32)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_mt_wcs(self, image):\n for chip in range(1,self._numchips+1,1):\n sci_chip = self._image[self.scienceExt,chip]\n ref_chip = image._image[image.scienceExt,chip]\n # Do we want to keep track of original WCS or not? No reason now...\n sci_chip.wcs = ref_chip.wcs.copy()", "response": "Reset the WCS for this image based on the WCS information from the WCS information from another imageObject."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef set_wtscl(self, chip, wtscl_par):\n sci_chip = self._image[self.scienceExt,chip]\n\n exptime = 1 #sci_chip._exptime\n _parval = 'unity'\n if wtscl_par is not None:\n if type(wtscl_par) == type(''):\n if not wtscl_par.isdigit():\n # String passed in as value, check for 'exptime' or 'expsq'\n _wtscl_float = None\n try:\n _wtscl_float = float(wtscl_par)\n except ValueError:\n _wtscl_float = None\n if _wtscl_float is not None:\n _wtscl = _wtscl_float\n elif wtscl_par == 'expsq':\n _wtscl = exptime*exptime\n _parval = 'expsq'\n else:\n # Default to the case of 'exptime', if\n # not explicitly specified as 'expsq'\n _wtscl = exptime\n else:\n # int value passed in as a string, convert to float\n _wtscl = float(wtscl_par)\n else:\n # We have a non-string value passed in...\n _wtscl = float(wtscl_par)\n else:\n # Default case: wt_scl = exptime\n _wtscl = exptime\n\n sci_chip._wtscl_par = _parval\n sci_chip._wtscl = _wtscl", "response": "Sets the value of the wt_scl parameter as needed for drizzling."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef getInstrParameter(self, value, header, keyword):\n if isinstance(value, str) and value in ['None', '', ' ', 'INDEF']:\n value = None\n\n if value and (keyword is not None and keyword.strip() != ''):\n exceptionMessage = \"ERROR: Your input is ambiguous! Please specify either a value or a keyword.\\n You specifed both \" + str(value) + \" and \" + str(keyword)\n raise ValueError(exceptionMessage)\n\n elif value is not None and value != '':\n return self._averageFromList(value)\n\n elif keyword is not None and keyword.strip() != '':\n return self._averageFromHeader(header, keyword)\n\n else:\n return None", "response": "This method gets the instrument parameter from the a\n pair of task parameters."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\naveraging out values taken from header. The keywords where to read values from are passed as a comma - separated list.", "response": "def _averageFromHeader(self, header, keyword):\n \"\"\" Averages out values taken from header. The keywords where\n to read values from are passed as a comma-separated list.\n \"\"\"\n _list = ''\n for _kw in keyword.split(','):\n if _kw in header:\n _list = _list + ',' + str(header[_kw])\n else:\n return None\n return self._averageFromList(_list)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _averageFromList(self, param):\n _result = 0.0\n _count = 0\n\n for _param in param.split(','):\n if _param != '' and float(_param) != 0.0:\n _result = _result + float(_param)\n _count += 1\n\n if _count >= 1:\n _result = _result / _count\n return _result", "response": "Averages out values passed as a comma - separated\n list disregarding the zero - valued entries."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncomputing the undistorted WCS based solely on the known distortion model information associated with the WCS.", "response": "def compute_wcslin(self,undistort=True):\n \"\"\" Compute the undistorted WCS based solely on the known distortion\n model information associated with the WCS.\n \"\"\"\n for chip in range(1,self._numchips+1,1):\n sci_chip = self._image[self.scienceExt,chip]\n chip_wcs = sci_chip.wcs.copy()\n\n if chip_wcs.sip is None or not undistort or chip_wcs.instrument=='DEFAULT':\n chip_wcs.sip = None\n chip_wcs.cpdis1 = None\n chip_wcs.cpdis2 = None\n chip_wcs.det2im = None\n undistort=False\n\n # compute the undistorted 'natural' plate scale for this chip\n wcslin = distortion.utils.output_wcs([chip_wcs],undistort=undistort)\n sci_chip.wcslin_pscale = wcslin.pscale"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ndefines units for this image.", "response": "def set_units(self,chip):\n \"\"\" Define units for this image.\n \"\"\"\n # Determine output value of BUNITS\n # and make sure it is not specified as 'ergs/cm...'\n sci_chip = self._image[self.scienceExt,chip]\n\n _bunit = None\n if 'BUNIT' in sci_chip.header and sci_chip.header['BUNIT'].find('ergs') < 0:\n _bunit = sci_chip.header['BUNIT']\n else:\n _bunit = 'ELECTRONS/S'\n sci_chip._bunit = _bunit\n #\n if '/s' in _bunit.lower():\n _in_units = 'cps'\n else:\n _in_units = 'counts'\n sci_chip.in_units = _in_units"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nprocess all headers to produce a set of combined headers that follows the rules defined by each instrument.", "response": "def getTemplates(fnames, blend=True):\n \"\"\" Process all headers to produce a set of combined headers\n that follows the rules defined by each instrument.\n\n \"\"\"\n if not blend:\n newhdrs = blendheaders.getSingleTemplate(fnames[0])\n newtab = None\n else:\n # apply rules to create final version of headers, plus table\n newhdrs, newtab = blendheaders.get_blended_headers(inputs=fnames)\n\n cleanTemplates(newhdrs[1],newhdrs[2],newhdrs[3])\n\n return newhdrs, newtab"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nadds WCS keywords to header hdr.", "response": "def addWCSKeywords(wcs,hdr,blot=False,single=False,after=None):\n \"\"\" Update input header 'hdr' with WCS keywords.\n \"\"\"\n wname = wcs.wcs.name\n if not single:\n wname = 'DRZWCS'\n\n # Update WCS Keywords based on PyDrizzle product's value\n # since 'drizzle' itself doesn't update that keyword.\n hdr['WCSNAME'] = wname\n hdr.set('VAFACTOR', value=1.0, after=after)\n hdr.set('ORIENTAT', value=wcs.orientat, after=after)\n\n # Use of 'after' not needed if these keywords already exist in the header\n if after in WCS_KEYWORDS:\n after = None\n\n if 'CTYPE1' not in hdr:\n hdr.set('CTYPE2', value=wcs.wcs.ctype[1], after=after)\n hdr.set('CTYPE1', value=wcs.wcs.ctype[0], after=after)\n hdr.set('CRPIX2', value=wcs.wcs.crpix[1], after=after)\n hdr.set('CRPIX1', value=wcs.wcs.crpix[0], after=after)\n hdr.set('CRVAL2', value=wcs.wcs.crval[1], after=after)\n hdr.set('CRVAL1', value=wcs.wcs.crval[0], after=after)\n hdr.set('CD2_2', value=wcs.wcs.cd[1][1], after=after)\n hdr.set('CD2_1', value=wcs.wcs.cd[1][0], after=after)\n hdr.set('CD1_2', value=wcs.wcs.cd[0][1], after=after)\n hdr.set('CD1_1', value=wcs.wcs.cd[0][0], after=after)\n\n # delete distortion model related keywords\n deleteDistortionKeywords(hdr)\n\n if not blot:\n blendheaders.remove_distortion_keywords(hdr)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nwrite out a simple FITS file given a numpy array and a name of another FITS file.", "response": "def writeSingleFITS(data,wcs,output,template,clobber=True,verbose=True):\n \"\"\" Write out a simple FITS file given a numpy array and the name of another\n FITS file to use as a template for the output image header.\n \"\"\"\n outname,outextn = fileutil.parseFilename(output)\n outextname,outextver = fileutil.parseExtn(outextn)\n\n if fileutil.findFile(outname):\n if clobber:\n log.info('Deleting previous output product: %s' % outname)\n fileutil.removeFile(outname)\n\n else:\n log.warning('Output file %s already exists and overwrite not '\n 'specified!' % outname)\n log.error('Quitting... Please remove before resuming operations.')\n raise IOError\n\n # Now update WCS keywords with values from provided WCS\n if hasattr(wcs.sip,'a_order'):\n siphdr = True\n else:\n siphdr = False\n wcshdr = wcs.wcs2header(sip2hdr=siphdr)\n\n if template is not None:\n # Get default headers from multi-extension FITS file\n # If input data is not in MEF FITS format, it will return 'None'\n # NOTE: These are HEADER objects, not HDUs\n (prihdr,scihdr,errhdr,dqhdr),newtab = getTemplates(template,EXTLIST)\n\n if scihdr is None:\n scihdr = fits.Header()\n indx = 0\n for c in prihdr.cards:\n if c.keyword not in ['INHERIT','EXPNAME']: indx += 1\n else: break\n for i in range(indx,len(prihdr)):\n scihdr.append(prihdr.cards[i])\n for i in range(indx, len(prihdr)):\n del prihdr[indx]\n else:\n scihdr = fits.Header()\n prihdr = fits.Header()\n # Start by updating PRIMARY header keywords...\n prihdr.set('EXTEND', value=True, after='NAXIS')\n prihdr['FILENAME'] = outname\n\n if outextname == '':\n outextname = 'sci'\n if outextver == 0: outextver = 1\n scihdr['EXTNAME'] = outextname.upper()\n scihdr['EXTVER'] = outextver\n\n for card in wcshdr.cards:\n scihdr[card.keyword] = (card.value, card.comment)\n\n # Create PyFITS HDUList for all extensions\n outhdu = fits.HDUList()\n # Setup primary header as an HDU ready for appending to output FITS file\n prihdu = fits.PrimaryHDU(header=prihdr)\n scihdu = fits.ImageHDU(header=scihdr,data=data)\n\n outhdu.append(prihdu)\n outhdu.append(scihdu)\n outhdu.writeto(outname)\n\n if verbose:\n print('Created output image: %s' % outname)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nwriting basic drizzle - related keywords out to the header.", "response": "def writeDrizKeywords(hdr,imgnum,drizdict):\n \"\"\" Write basic drizzle-related keywords out to image header as a record\n of the processing performed to create the image\n\n The dictionary 'drizdict' will contain the keywords and values to be\n written out to the header.\n \"\"\"\n _keyprefix = 'D%03d'%imgnum\n\n for key in drizdict:\n val = drizdict[key]['value']\n if val is None: val = \"\"\n comment = drizdict[key]['comment']\n if comment is None: comment = \"\"\n hdr[_keyprefix+key] = (val, drizdict[key]['comment'])"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef writeFITS(self, template, sciarr, whtarr, ctxarr=None,\n versions=None, overwrite=yes, blend=True, virtual=False):\n \"\"\"\n Generate PyFITS objects for each output extension\n using the file given by 'template' for populating\n headers.\n\n The arrays will have the size specified by 'shape'.\n \"\"\"\n if not isinstance(template, list):\n template = [template]\n\n if fileutil.findFile(self.output):\n if overwrite:\n log.info('Deleting previous output product: %s' % self.output)\n fileutil.removeFile(self.output)\n\n else:\n log.warning('Output file %s already exists and overwrite not '\n 'specified!' % self.output)\n log.error('Quitting... Please remove before resuming '\n 'operations.')\n raise IOError\n\n # initialize output value for this method\n outputFITS = {}\n # Default value for NEXTEND when 'build'== True\n nextend = 3\n if not self.build:\n nextend = 0\n if self.outweight:\n if overwrite:\n if fileutil.findFile(self.outweight):\n log.info('Deleting previous output WHT product: %s' %\n self.outweight)\n fileutil.removeFile(self.outweight)\n else:\n log.warning('Output file %s already exists and overwrite '\n 'not specified!' % self.outweight)\n log.error('Quitting... Please remove before resuming '\n 'operations.')\n raise IOError\n\n\n if self.outcontext:\n if overwrite:\n if fileutil.findFile(self.outcontext):\n log.info('Deleting previous output CTX product: %s' %\n self.outcontext)\n fileutil.removeFile(self.outcontext)\n else:\n log.warning('Output file %s already exists and overwrite '\n 'not specified!' % self.outcontext)\n log.error('Quitting... Please remove before resuming '\n 'operations.')\n raise IOError\n\n\n # Get default headers from multi-extension FITS file\n # If only writing out single drizzle product, blending needs to be\n # forced off as there is only 1 input to report, no blending needed\n if self.single:\n blend=False\n\n # If input data is not in MEF FITS format, it will return 'None'\n # and those headers will have to be generated from drizzle output\n # file FITS headers.\n # NOTE: These are HEADER objects, not HDUs\n #prihdr,scihdr,errhdr,dqhdr = getTemplates(template)\n self.fullhdrs, intab = getTemplates(template, blend=False)\n\n newhdrs, newtab = getTemplates(template,blend=blend)\n if newtab is not None: nextend += 1 # account for new table extn\n\n prihdr = newhdrs[0]\n scihdr = newhdrs[1]\n errhdr = newhdrs[2]\n dqhdr = newhdrs[3]\n\n # Setup primary header as an HDU ready for appending to output FITS file\n prihdu = fits.PrimaryHDU(header=prihdr, data=None)\n\n # Start by updating PRIMARY header keywords...\n prihdu.header.set('EXTEND', value=True, after='NAXIS')\n prihdu.header['NEXTEND'] = nextend\n prihdu.header['FILENAME'] = self.output\n prihdu.header['PROD_VER'] = 'DrizzlePac {}'.format(version.__version__)\n\n # Update the ROOTNAME with the new value as well\n _indx = self.output.find('_drz')\n if _indx < 0:\n rootname_val = self.output\n else:\n rootname_val = self.output[:_indx]\n prihdu.header['ROOTNAME'] = rootname_val\n\n\n # Get the total exposure time for the image\n # If not calculated by PyDrizzle and passed through\n # the pardict, then leave value from the template image.\n if self.texptime:\n prihdu.header['EXPTIME'] = self.texptime\n prihdu.header.set('TEXPTIME', value=self.texptime, after='EXPTIME')\n prihdu.header['EXPSTART'] = self.expstart\n prihdu.header['EXPEND'] = self.expend\n\n #Update ASN_MTYPE to reflect the fact that this is a product\n # Currently hard-wired to always output 'PROD-DTH' as MTYPE\n prihdu.header['ASN_MTYP'] = 'PROD-DTH'\n\n # Update DITHCORR calibration keyword if present\n # Remove when we can modify FITS headers in place...\n if 'DRIZCORR' in prihdu.header:\n prihdu.header['DRIZCORR'] = 'COMPLETE'\n if 'DITHCORR' in prihdu.header:\n prihdu.header['DITHCORR'] = 'COMPLETE'\n\n prihdu.header['NDRIZIM'] =(len(self.parlist),\n 'Drizzle, No. images drizzled onto output')\n\n # Only a subset of these keywords makes sense for the new WCS based\n # transformations. They need to be reviewed to decide what to keep\n # and what to leave out.\n if not self.blot:\n self.addDrizKeywords(prihdu.header,versions)\n\n if scihdr:\n try:\n del scihdr['OBJECT']\n except KeyError:\n pass\n\n if 'CCDCHIP' in scihdr: scihdr['CCDCHIP'] = '-999'\n if 'NCOMBINE' in scihdr:\n scihdr['NCOMBINE'] = self.parlist[0]['nimages']\n\n # If BUNIT keyword was found and reset, then\n bunit_last_kw = self.find_kwupdate_location(scihdr,'bunit')\n if self.bunit is not None:\n comment_str = \"Units of science product\"\n if self.bunit.lower()[:5] == 'count':\n comment_str = \"counts * gain = electrons\"\n scihdr.set('BUNIT', value=self.bunit,\n comment=comment_str,\n after=bunit_last_kw)\n else:\n # check to see whether to update already present BUNIT comment\n if 'bunit' in scihdr and scihdr['bunit'].lower()[:5] == 'count':\n comment_str = \"counts * gain = electrons\"\n scihdr.set('BUNIT', value=scihdr['bunit'],\n comment=comment_str,\n after=bunit_last_kw)\n\n # Add WCS keywords to SCI header\n if self.wcs:\n pre_wcs_kw = self.find_kwupdate_location(scihdr,'CD1_1')\n addWCSKeywords(self.wcs,scihdr,blot=self.blot,\n single=self.single, after=pre_wcs_kw)\n # Recompute this after removing distortion kws\n pre_wcs_kw = self.find_kwupdate_location(scihdr,'CD1_1')\n\n ##########\n # Now, build the output file\n ##########\n if self.build:\n print('-Generating multi-extension output file: ',self.output)\n fo = fits.HDUList()\n\n # Add primary header to output file...\n fo.append(prihdu)\n\n if self.single and self.compress:\n hdu = fits.CompImageHDU(data=sciarr, header=scihdr, name=EXTLIST[0])\n else:\n hdu = fits.ImageHDU(data=sciarr, header=scihdr, name=EXTLIST[0])\n last_kw = self.find_kwupdate_location(scihdr,'EXTNAME')\n hdu.header.set('EXTNAME', value='SCI', after=last_kw)\n hdu.header.set('EXTVER', value=1, after='EXTNAME')\n fo.append(hdu)\n\n # Build WHT extension here, if requested...\n if errhdr:\n errhdr['CCDCHIP'] = '-999'\n\n if self.single and self.compress:\n hdu = fits.CompImageHDU(data=whtarr, header=errhdr, name=EXTLIST[1])\n else:\n hdu = fits.ImageHDU(data=whtarr, header=errhdr, name=EXTLIST[1])\n last_kw = self.find_kwupdate_location(errhdr,'EXTNAME')\n hdu.header.set('EXTNAME', value='WHT', after=last_kw)\n hdu.header.set('EXTVER', value=1, after='EXTNAME')\n if self.wcs:\n pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')\n # Update WCS Keywords based on PyDrizzle product's value\n # since 'drizzle' itself doesn't update that keyword.\n addWCSKeywords(self.wcs,hdu.header,blot=self.blot,\n single=self.single, after=pre_wcs_kw)\n fo.append(hdu)\n\n # Build CTX extension here\n # If there is only 1 plane, write it out as a 2-D extension\n if self.outcontext:\n if ctxarr.shape[0] == 1:\n _ctxarr = ctxarr[0]\n else:\n _ctxarr = ctxarr\n else:\n _ctxarr = None\n\n if self.single and self.compress:\n hdu = fits.CompImageHDU(data=_ctxarr, header=dqhdr, name=EXTLIST[2])\n else:\n hdu = fits.ImageHDU(data=_ctxarr, header=dqhdr, name=EXTLIST[2])\n last_kw = self.find_kwupdate_location(dqhdr,'EXTNAME')\n hdu.header.set('EXTNAME', value='CTX', after=last_kw)\n hdu.header.set('EXTVER', value=1, after='EXTNAME')\n\n if self.wcs:\n pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')\n # Update WCS Keywords based on PyDrizzle product's value\n # since 'drizzle' itself doesn't update that keyword.\n addWCSKeywords(self.wcs,hdu.header,blot=self.blot,\n single=self.single, after=pre_wcs_kw)\n fo.append(hdu)\n\n # remove all alternate WCS solutions from headers of this product\n wcs_functions.removeAllAltWCS(fo,[1])\n\n # add table of combined header keyword values to FITS file\n if newtab is not None:\n fo.append(newtab)\n\n if not virtual:\n print('Writing out to disk:',self.output)\n # write out file to disk\n fo.writeto(self.output)\n fo.close()\n del fo, hdu\n fo = None\n # End 'if not virtual'\n outputFITS[self.output]= fo\n\n else:\n print('-Generating simple FITS output: %s' % self.outdata)\n\n fo = fits.HDUList()\n hdu_header = prihdu.header.copy()\n del hdu_header['nextend']\n\n # Append remaining unique header keywords from template DQ\n # header to Primary header...\n if scihdr:\n for _card in scihdr.cards:\n if _card.keyword not in RESERVED_KEYS and _card.keyword not in hdu_header:\n hdu_header.append(_card)\n for kw in ['PCOUNT', 'GCOUNT']:\n try:\n del kw\n except KeyError:\n pass\n hdu_header['filename'] = self.outdata\n\n if self.compress:\n hdu = fits.CompImageHDU(data=sciarr, header=hdu_header)\n wcs_ext = [1]\n else:\n hdu = fits.ImageHDU(data=sciarr, header=hdu_header)\n wcs_ext = [0]\n\n # explicitly set EXTEND to FALSE for simple FITS files.\n dim = len(sciarr.shape)\n hdu.header.set('extend', value=False, after='NAXIS%s'%dim)\n\n # Add primary header to output file...\n fo.append(hdu)\n\n # remove all alternate WCS solutions from headers of this product\n logutil.logging.disable(logutil.logging.INFO)\n wcs_functions.removeAllAltWCS(fo,wcs_ext)\n logutil.logging.disable(logutil.logging.NOTSET)\n\n # add table of combined header keyword values to FITS file\n if newtab is not None:\n fo.append(newtab)\n\n if not virtual:\n print('Writing out image to disk:',self.outdata)\n # write out file to disk\n fo.writeto(self.outdata)\n del fo,hdu\n fo = None\n # End 'if not virtual'\n outputFITS[self.outdata]= fo\n\n if self.outweight and whtarr is not None:\n # We need to build new PyFITS objects for each WHT array\n fwht = fits.HDUList()\n\n if errhdr:\n errhdr['CCDCHIP'] = '-999'\n\n if self.compress:\n hdu = fits.CompImageHDU(data=whtarr, header=prihdu.header)\n else:\n hdu = fits.ImageHDU(data=whtarr, header=prihdu.header)\n # Append remaining unique header keywords from template DQ\n # header to Primary header...\n if errhdr:\n for _card in errhdr.cards:\n if _card.keyword not in RESERVED_KEYS and _card.keyword not in hdu.header:\n hdu.header.append(_card)\n hdu.header['filename'] = self.outweight\n hdu.header['CCDCHIP'] = '-999'\n if self.wcs:\n pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')\n # Update WCS Keywords based on PyDrizzle product's value\n # since 'drizzle' itself doesn't update that keyword.\n addWCSKeywords(self.wcs,hdu.header, blot=self.blot,\n single=self.single, after=pre_wcs_kw)\n\n # Add primary header to output file...\n fwht.append(hdu)\n # remove all alternate WCS solutions from headers of this product\n wcs_functions.removeAllAltWCS(fwht,wcs_ext)\n\n if not virtual:\n print('Writing out image to disk:',self.outweight)\n fwht.writeto(self.outweight)\n del fwht,hdu\n fwht = None\n # End 'if not virtual'\n outputFITS[self.outweight]= fwht\n\n # If a context image was specified, build a PyFITS object\n # for it as well...\n if self.outcontext and ctxarr is not None:\n fctx = fits.HDUList()\n\n # If there is only 1 plane, write it out as a 2-D extension\n if ctxarr.shape[0] == 1:\n _ctxarr = ctxarr[0]\n else:\n _ctxarr = ctxarr\n\n if self.compress:\n hdu = fits.CompImageHDU(data=_ctxarr, header=prihdu.header)\n else:\n hdu = fits.ImageHDU(data=_ctxarr, header=prihdu.header)\n # Append remaining unique header keywords from template DQ\n # header to Primary header...\n if dqhdr:\n for _card in dqhdr.cards:\n if ( (_card.keyword not in RESERVED_KEYS) and\n _card.keyword not in hdu.header):\n hdu.header.append(_card)\n hdu.header['filename'] = self.outcontext\n if self.wcs:\n pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')\n # Update WCS Keywords based on PyDrizzle product's value\n # since 'drizzle' itself doesn't update that keyword.\n addWCSKeywords(self.wcs,hdu.header, blot=self.blot,\n single=self.single, after=pre_wcs_kw)\n\n fctx.append(hdu)\n # remove all alternate WCS solutions from headers of this product\n wcs_functions.removeAllAltWCS(fctx,wcs_ext)\n if not virtual:\n print('Writing out image to disk:',self.outcontext)\n fctx.writeto(self.outcontext)\n del fctx,hdu\n fctx = None\n # End 'if not virtual'\n\n outputFITS[self.outcontext]= fctx\n\n return outputFITS", "response": "Write the FITS file for each output extension."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nfinds the last keyword in the output header that comes before the new keyword in the original full input headers.", "response": "def find_kwupdate_location(self,hdr,keyword):\n \"\"\"\n Find the last keyword in the output header that comes before the new\n keyword in the original, full input headers.\n This will rely on the original ordering of keywords from the original input\n files in order to place the updated keyword in the correct location in case\n the keyword was removed from the output header prior to calling this method.\n \"\"\"\n # start by looping through the full templates\n kw_list = None\n last_kw = None\n for extn in self.fullhdrs:\n if keyword in extn:\n #indx = extn.ascard.index_of(keyword)\n indx = extn.index(keyword)\n kw_list = list(extn.keys())[:indx]\n break\n if kw_list:\n # find which keyword from this list exists in header to be updated\n for kw in kw_list[::-1]:\n if kw in hdr:\n last_kw = kw\n break\n # determine new value for the last keyword found before the HISTORY kws\n if last_kw is None:\n hdrkeys = list(hdr.keys())\n i = -1\n last_kw = hdrkeys[i]\n while last_kw == 'HISTORY':\n i -= 1\n last_kw = hdrkeys[i]\n\n return last_kw"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef addDrizKeywords(self,hdr,versions):\n\n # Extract some global information for the keywords\n _geom = 'User parameters'\n\n _imgnum = 0\n for pl in self.parlist:\n\n # Start by building up the keyword prefix based\n # on the image number for the chip\n #_keyprefix = 'D%03d'%_imgnum\n _imgnum += 1\n\n drizdict = DRIZ_KEYWORDS.copy()\n # Update drizdict with current values\n drizdict['VER']['value'] = pl['driz_version'][:44]\n drizdict['DATA']['value'] = pl['data'][:64]\n drizdict['DEXP']['value'] = pl['exptime']\n drizdict['OUDA']['value'] = pl['outFinal'][:64]\n drizdict['OUWE']['value'] = pl['outWeight'][:64]\n if pl['outContext'] is None:\n outcontext = \"\"\n else:\n outcontext = pl['outContext'][:64]\n drizdict['OUCO']['value'] = outcontext\n if self.single:\n drizdict['MASK']['value'] = pl['singleDrizMask'][:64]\n else:\n drizdict['MASK']['value'] = pl['finalMask'][:64]\n\n # Process the values of WT_SCL to be consistent with\n # what IRAF Drizzle would output\n if 'wt_scl_val' in pl:\n _wtscl = pl['wt_scl_val']\n else:\n if pl['wt_scl'] == 'exptime': _wtscl = pl['exptime']\n elif pl['wt_scl'] == 'expsq': _wtscl = pl['exptime']*pl['exptime']\n else: _wtscl = pl['wt_scl']\n\n drizdict['WTSC']['value'] = _wtscl\n drizdict['KERN']['value'] = pl['kernel']\n drizdict['PIXF']['value'] = pl['pixfrac']\n drizdict['OUUN']['value'] = self.units\n if pl['fillval'] is None:\n _fillval = 'INDEF'\n else:\n _fillval = pl['fillval']\n drizdict['FVAL']['value'] = _fillval\n drizdict['WKEY']['value'] = pl['driz_wcskey']\n\n drizdict['SCAL'] = {'value':pl['scale'],'comment':'Drizzle, pixel size (arcsec) of output image'}\n drizdict['ISCL'] = {'value':pl['idcscale'],'comment':'Drizzle, default IDCTAB pixel size(arcsec)'}\n\n # Now update header with values\n writeDrizKeywords(hdr,_imgnum,drizdict)\n del drizdict\n\n # Add version information as HISTORY cards to the header\n if versions is not None:\n ver_str = \"AstroDrizzle processing performed using: \"\n hdr.add_history(ver_str)\n for k in versions.keys():\n ver_str = ' '+str(k)+' Version '+str(versions[k])\n hdr.add_history(ver_str)", "response": "Add the drizzle parameter keywords to the header."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nperforms an iterative - fit with nclip iterations", "response": "def iter_fit_shifts(xy,uv,nclip=3,sigma=3.0):\n \"\"\" Perform an iterative-fit with 'nclip' iterations\n \"\"\"\n fit = fit_shifts(xy,uv)\n if nclip is None: nclip = 0\n # define index to initially include all points\n for n in range(nclip):\n resids = compute_resids(xy,uv,fit)\n resids1d = np.sqrt(np.power(resids[:,0],2)+np.power(resids[:,1],2))\n sig = resids1d.std()\n # redefine what pixels will be included in next iteration\n goodpix = resids1d < sigma*sig\n xy = xy[goodpix]\n uv = uv[goodpix]\n fit = fit_shifts(xy,uv)\n\n fit['img_coords'] = xy\n fit['ref_coords'] = uv\n\n return fit"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nperforms an rscale fit between matched lists of pixel positions xy and uv.", "response": "def fit_all(xy,uv,mode='rscale',center=None,verbose=True):\n \"\"\" Performs an 'rscale' fit between matched lists of pixel positions xy and uv\"\"\"\n if mode not in ['general', 'shift', 'rscale']:\n mode = 'rscale'\n if not isinstance(xy,np.ndarray):\n # cast input list as numpy ndarray for fitting\n xy = np.array(xy)\n if not isinstance(uv,np.ndarray):\n # cast input list as numpy ndarray for fitting\n uv = np.array(uv)\n\n if mode == 'shift':\n logstr = 'Performing \"shift\" fit'\n if verbose:\n print(logstr)\n else:\n log.info(logstr)\n result = fit_shifts(xy, uv)\n\n elif mode == 'general':\n logstr = 'Performing \"general\" fit'\n if verbose:\n print(logstr)\n else:\n log.info(logstr)\n result = fit_general(xy, uv)\n\n else:\n logstr = 'Performing \"rscale\" fit'\n if verbose:\n print(logstr)\n else:\n log.info(logstr)\n result = geomap_rscale(xy, uv, center=center)\n\n return result"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef fit_shifts(xy, uv):\n diff_pts = xy - uv\n Pcoeffs = np.array([1.0,0.0,diff_pts[:,0].mean(dtype=np.float64)])\n Qcoeffs = np.array([0.0,1.0,diff_pts[:,1].mean(dtype=np.float64)])\n\n fit = build_fit(Pcoeffs, Qcoeffs, 'shift')\n resids = diff_pts - fit['offset']\n fit['resids'] = resids\n fit['rms'] = resids.std(axis=0)\n fit['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))\n fit['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))\n\n return fit", "response": "This function calculates the fit for the shift only between two lists of positions xy and uv."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef fit_general(xy, uv):\n # Set up products used for computing the fit\n gxy = uv.astype(ndfloat128)\n guv = xy.astype(ndfloat128)\n Sx = gxy[:,0].sum()\n Sy = gxy[:,1].sum()\n Su = guv[:,0].sum()\n Sv = guv[:,1].sum()\n\n Sux = np.dot(guv[:,0], gxy[:,0])\n Svx = np.dot(guv[:,1], gxy[:,0])\n Suy = np.dot(guv[:,0], gxy[:,1])\n Svy = np.dot(guv[:,1], gxy[:,1])\n Sxx = np.dot(gxy[:,0], gxy[:,0])\n Syy = np.dot(gxy[:,1], gxy[:,1])\n Sxy = np.dot(gxy[:,0], gxy[:,1])\n\n n = len(xy[:,0])\n M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])\n U = np.array([Su, Sux, Suy])\n V = np.array([Sv, Svx, Svy])\n\n # The fit solutioN...\n # where\n # u = P0 + P1*x + P2*y\n # v = Q0 + Q1*x + Q2*y\n #\n try:\n invM = np.linalg.inv(M.astype(np.float64))\n except np.linalg.LinAlgError:\n raise SingularMatrixError(\n \"Singular matrix: suspected colinear points.\"\n )\n P = np.dot(invM, U).astype(np.float64)\n Q = np.dot(invM, V).astype(np.float64)\n if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):\n raise ArithmeticError('Singular matrix.')\n\n # Return the shift, rotation, and scale changes\n result = build_fit(P, Q, 'general')\n resids = xy - np.dot(uv, result['fit_matrix']) - result['offset']\n result['rms'] = resids.std(axis=0)\n result['resids'] = resids\n result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))\n result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))\n\n return result", "response": "This function computes a simple fit for the shift only between two lists of positions xy and uv."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nperform a generalized fit between matched lists of positions given by the 2 column arrays xy and uv. This function fits for translation, rotation, and scale changes between 'xy' and 'uv', allowing for different scales and orientations for X and Y axes. ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. ================================= Output: (Xo,Yo),Rot,(Scale,Sx,Sy) where Xo,Yo: offset, Rot: rotation, Scale: average scale change, and Sx,Sy: scale changes in X and Y separately. Algorithm and nomenclature provided by: Colin Cox (11 Nov 2004)", "response": "def fit_arrays(uv, xy):\n \"\"\" Performs a generalized fit between matched lists of positions\n given by the 2 column arrays xy and uv.\n\n This function fits for translation, rotation, and scale changes\n between 'xy' and 'uv', allowing for different scales and\n orientations for X and Y axes.\n\n =================================\n DEVELOPMENT NOTE:\n Checks need to be put in place to verify that\n enough objects are available for a fit.\n =================================\n\n Output:\n (Xo,Yo),Rot,(Scale,Sx,Sy)\n where\n Xo,Yo: offset,\n Rot: rotation,\n Scale: average scale change, and\n Sx,Sy: scale changes in X and Y separately.\n\n Algorithm and nomenclature provided by: Colin Cox (11 Nov 2004)\n \"\"\"\n\n if not isinstance(xy,np.ndarray):\n # cast input list as numpy ndarray for fitting\n xy = np.array(xy)\n if not isinstance(uv,np.ndarray):\n # cast input list as numpy ndarray for fitting\n uv = np.array(uv)\n\n # Set up products used for computing the fit\n Sx = xy[:,0].sum()\n Sy = xy[:,1].sum()\n Su = uv[:,0].sum()\n Sv = uv[:,1].sum()\n\n Sux = np.dot(uv[:,0], xy[:,0])\n Svx = np.dot(uv[:,1], xy[:,0])\n Suy = np.dot(uv[:,0], xy[:,1])\n Svy = np.dot(uv[:,1], xy[:,1])\n Sxx = np.dot(xy[:,0], xy[:,0])\n Syy = np.dot(xy[:,1], xy[:,1])\n Sxy = np.dot(xy[:,0], xy[:,1])\n\n n = len(xy[:,0])\n M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])\n U = np.array([Su, Sux, Suy])\n V = np.array([Sv, Svx, Svy])\n\n # The fit solutioN...\n # where\n # u = P0 + P1*x + P2*y\n # v = Q0 + Q1*x + Q2*y\n #\n try:\n invM = np.linalg.inv(M.astype(np.float64))\n except np.linalg.LinAlgError:\n raise SingularMatrixError(\n \"Singular matrix: suspected colinear points.\"\n )\n P = np.dot(invM, U).astype(np.float64)\n Q = np.dot(invM, V).astype(np.float64)\n if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):\n raise ArithmeticError('Singular matrix.')\n\n # Return the shift, rotation, and scale changes\n return build_fit(P, Q, 'general')"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef apply_old_coeffs(xy,coeffs):\n _theta = np.deg2rad(coeffs[1])\n _mrot = np.zeros(shape=(2,2),dtype=np.float64)\n _mrot[0] = (np.cos(_theta),np.sin(_theta))\n _mrot[1] = (-np.sin(_theta),np.cos(_theta))\n\n new_pos = (np.dot(xy,_mrot)/coeffs[2][0]) + coeffs[0]\n\n return new_pos", "response": "Apply the offset shift and rotation values from a linear fit\n to an array of x y positions."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\napplies the coefficients from a linear fit to an array of x y positions.", "response": "def apply_fit(xy,coeffs):\n \"\"\" Apply the coefficients from a linear fit to\n an array of x,y positions.\n\n The coeffs come from the 'coeffs' member of the\n 'fit_arrays()' output.\n \"\"\"\n x_new = coeffs[0][2] + coeffs[0][0]*xy[:,0] + coeffs[0][1]*xy[:,1]\n y_new = coeffs[1][2] + coeffs[1][0]*xy[:,0] + coeffs[1][1]*xy[:,1]\n\n return x_new,y_new"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncompute the residuals based on fit and input arrays to the fit", "response": "def compute_resids(xy,uv,fit):\n \"\"\" Compute the residuals based on fit and input arrays to the fit\n \"\"\"\n print('FIT coeffs: ',fit['coeffs'])\n xn,yn = apply_fit(uv,fit['coeffs'])\n resids = xy - np.transpose([xn,yn])\n return resids"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef geomap_rscale(xyin,xyref,center=None):\n if center is not None:\n xcen = center[0]\n ycen = center[1]\n else:\n xcen = xyref[:,0].mean()\n ycen = xyref[:,1].mean()\n\n dx = xyref[:,0].astype(ndfloat128)\n dy = xyref[:,1].astype(ndfloat128)\n du = xyin[:,0].astype(ndfloat128)\n dv = xyin[:,1].astype(ndfloat128)\n\n n = xyref.shape[0]\n Sx = dx.sum()\n Sy = dy.sum()\n Su = du.sum()\n Sv = dv.sum()\n xr0 = Sx/n\n yr0 = Sy/n\n xi0 = Su/n\n yi0 = Sv/n\n Sxrxr = np.power((dx-xr0),2).sum()\n Syryr = np.power((dy-yr0),2).sum()\n Syrxi = ((dy-yr0)*(du-xi0)).sum()\n Sxryi = ((dx-xr0)*(dv-yi0)).sum()\n Sxrxi = ((dx-xr0)*(du-xi0)).sum()\n Syryi = ((dy-yr0)*(dv-yi0)).sum()\n\n rot_num = Sxrxi * Syryi\n rot_denom = Syrxi * Sxryi\n if rot_num == rot_denom: det = 0.0\n else: det = rot_num - rot_denom\n if (det < 0):\n rot_num = Syrxi + Sxryi\n rot_denom = Sxrxi - Syryi\n else:\n rot_num = Syrxi - Sxryi\n rot_denom = Sxrxi + Syryi\n if rot_num == rot_denom: theta = 0.0\n else:\n theta = np.rad2deg(np.arctan2(rot_num,rot_denom))\n if theta < 0:\n theta += 360.0\n\n ctheta = np.cos(np.deg2rad(theta))\n stheta = np.sin(np.deg2rad(theta))\n s_num = rot_denom*ctheta + rot_num*stheta\n s_denom = Sxrxr + Syryr\n if s_denom < 0:\n mag = 1.0\n else:\n mag = s_num/s_denom\n\n if det < 0:\n # \"flip\" y-axis (reflection about x-axis *after* rotation)\n # NOTE: keep in mind that 'fit_matrix'\n # is the transposed rotation matrix.\n sthetax = -mag*stheta\n cthetay = -mag*ctheta\n else:\n sthetax = mag*stheta\n cthetay = mag*ctheta\n cthetax = mag*ctheta\n sthetay = mag*stheta\n\n sdet = np.sign(det)\n xshift = (xi0 - (xr0*cthetax + sdet*yr0*sthetax)).astype(np.float64)\n yshift = (yi0 - (-sdet*xr0*sthetay + yr0*cthetay)).astype(np.float64)\n\n P = np.array([ cthetax, sthetay, xshift],dtype=np.float64)\n Q = np.array([ -sthetax, cthetay, yshift],dtype=np.float64)\n\n # Return the shift, rotation, and scale changes\n result = build_fit(P, Q, fitgeom='rscale')\n resids = xyin - np.dot((xyref), result['fit_matrix']) - result['offset']\n result['rms'] = resids.std(axis=0)\n result['resids'] = resids\n result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))\n result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))\n\n return result", "response": "This function calculates the fit of a single object based on the geomap function."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef AstroDrizzle(input=None, mdriztab=False, editpars=False, configobj=None,\n wcsmap=None, **input_dict):\n \"\"\" AstroDrizzle command-line interface \"\"\"\n # Support input of filenames from command-line without a parameter name\n # then copy this into input_dict for merging with TEAL ConfigObj\n # parameters.\n\n # Load any user-specified configobj\n if isinstance(configobj, (str, bytes)):\n if configobj == 'defaults':\n # load \"TEAL\"-defaults (from ~/.teal/):\n configobj = teal.load(__taskname__)\n else:\n if not os.path.exists(configobj):\n raise RuntimeError('Cannot find .cfg file: '+configobj)\n configobj = teal.load(configobj, strict=False)\n elif configobj is None:\n # load 'astrodrizzle' parameter defaults as described in the docs:\n configobj = teal.load(__taskname__, defaults=True)\n\n if input and not util.is_blank(input):\n input_dict['input'] = input\n elif configobj is None:\n raise TypeError(\"AstroDrizzle() needs either 'input' or \"\n \"'configobj' arguments\")\n\n if 'updatewcs' in input_dict: # user trying to explicitly turn on updatewcs\n configobj['updatewcs'] = input_dict['updatewcs']\n del input_dict['updatewcs']\n\n # If called from interactive user-interface, configObj will not be\n # defined yet, so get defaults using EPAR/TEAL.\n #\n # Also insure that the input_dict (user-specified values) are folded in\n # with a fully populated configObj instance.\n try:\n configObj = util.getDefaultConfigObj(__taskname__, configobj,\n input_dict,\n loadOnly=(not editpars))\n log.debug('')\n log.debug(\"INPUT_DICT:\")\n util.print_cfg(input_dict, log.debug)\n log.debug('')\n # If user specifies optional parameter for final_wcs specification in input_dict,\n # insure that the final_wcs step gets turned on\n util.applyUserPars_steps(configObj, input_dict, step='3a')\n util.applyUserPars_steps(configObj, input_dict, step='7a')\n\n except ValueError:\n print(\"Problem with input parameters. Quitting...\", file=sys.stderr)\n return\n\n if not configObj:\n return\n\n configObj['mdriztab'] = mdriztab\n # If 'editpars' was set to True, util.getDefaultConfigObj() will have\n # already called 'run()'.\n if not editpars:\n run(configObj, wcsmap=wcsmap)", "response": "AstroDrizzle command - line interface."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ninitials example by Nadia ran MD with configobj EPAR using: It can be run in one of two ways: from stsci.tools import teal 1. Passing a config object to teal teal.teal('drizzlepac/pars/astrodrizzle.cfg') 2. Passing a task name: teal.teal('astrodrizzle') The example config files are in drizzlepac/pars", "response": "def run(configobj, wcsmap=None):\n \"\"\"\n Initial example by Nadia ran MD with configobj EPAR using:\n It can be run in one of two ways:\n\n from stsci.tools import teal\n\n 1. Passing a config object to teal\n\n teal.teal('drizzlepac/pars/astrodrizzle.cfg')\n\n\n 2. Passing a task name:\n\n teal.teal('astrodrizzle')\n\n The example config files are in drizzlepac/pars\n\n \"\"\"\n # turn on logging, redirecting stdout/stderr messages to a log file\n # while also printing them out to stdout as well\n # also, initialize timing of processing steps\n #\n # We need to define a default logfile name from the user's parameters\n input_list, output, ivmlist, odict = \\\n processInput.processFilenames(configobj['input'])\n\n if output is not None:\n def_logname = output\n elif len(input_list) > 0:\n def_logname = input_list[0]\n else:\n print(textutil.textbox(\n \"ERROR:\\nNo valid input files found! Please restart the task \"\n \"and check the value for the 'input' parameter.\"), file=sys.stderr)\n def_logname = None\n return\n\n clean = configobj['STATE OF INPUT FILES']['clean']\n procSteps = util.ProcSteps()\n\n print(\"AstroDrizzle Version {:s} ({:s}) started at: {:s}\\n\"\n .format(__version__, __version_date__, util._ptime()[0]))\n util.print_pkg_versions(log=log)\n\n log.debug('')\n log.debug(\n \"==== AstroDrizzle was invoked with the following parameters: ====\"\n )\n log.debug('')\n util.print_cfg(configobj, log.debug)\n\n try:\n # Define list of imageObject instances and output WCSObject instance\n # based on input paramters\n imgObjList = None\n procSteps.addStep('Initialization')\n imgObjList, outwcs = processInput.setCommonInput(configobj)\n procSteps.endStep('Initialization')\n\n if imgObjList is None or not imgObjList:\n errmsg = \"No valid images found for processing!\\n\"\n errmsg += \"Check log file for full details.\\n\"\n errmsg += \"Exiting AstroDrizzle now...\"\n print(textutil.textbox(errmsg, width=65))\n print(textutil.textbox(\n 'ERROR:\\nAstroDrizzle Version {:s} encountered a problem! '\n 'Processing terminated at {:s}.'\n .format(__version__, util._ptime()[0])), file=sys.stderr)\n return\n\n log.info(\"USER INPUT PARAMETERS common to all Processing Steps:\")\n util.printParams(configobj, log=log)\n\n # Call rest of MD steps...\n #create static masks for each image\n staticMask.createStaticMask(imgObjList, configobj,\n procSteps=procSteps)\n\n #subtract the sky\n sky.subtractSky(imgObjList, configobj, procSteps=procSteps)\n\n# _dbg_dump_virtual_outputs(imgObjList)\n\n #drizzle to separate images\n adrizzle.drizSeparate(imgObjList, outwcs, configobj, wcsmap=wcsmap,\n procSteps=procSteps)\n\n# _dbg_dump_virtual_outputs(imgObjList)\n\n #create the median images from the driz sep images\n createMedian.createMedian(imgObjList, configobj, procSteps=procSteps)\n\n #blot the images back to the original reference frame\n ablot.runBlot(imgObjList, outwcs, configobj, wcsmap=wcsmap,\n procSteps=procSteps)\n\n #look for cosmic rays\n drizCR.rundrizCR(imgObjList, configobj, procSteps=procSteps)\n\n #Make your final drizzled image\n adrizzle.drizFinal(imgObjList, outwcs, configobj, wcsmap=wcsmap,\n procSteps=procSteps)\n\n print()\n print(\"AstroDrizzle Version {:s} is finished processing at {:s}.\\n\"\n .format(__version__, util._ptime()[0]))\n\n except:\n clean = False\n print(textutil.textbox(\n \"ERROR:\\nAstroDrizzle Version {:s} encountered a problem! \"\n \"Processing terminated at {:s}.\"\n .format(__version__, util._ptime()[0])), file=sys.stderr)\n raise\n\n finally:\n procSteps.reportTimes()\n if imgObjList:\n for image in imgObjList:\n if clean:\n image.clean()\n image.close()\n del imgObjList\n del outwcs"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ndump some helpful information about virtual outputs", "response": "def _dbg_dump_virtual_outputs(imgObjList):\n \"\"\" dump some helpful information. strictly for debugging \"\"\"\n global _fidx\n tag = 'virtual'\n log.info((tag+' ')*7)\n for iii in imgObjList:\n log.info('-'*80)\n log.info(tag+' orig nm: '+iii._original_file_name)\n log.info(tag+' names.data: '+str(iii.outputNames[\"data\"]))\n log.info(tag+' names.orig: '+str(iii.outputNames[\"origFilename\"]))\n log.info(tag+' id: '+str(id(iii)))\n log.info(tag+' in.mem: '+str(iii.inmemory))\n log.info(tag+' vo items...')\n for vok in sorted(iii.virtualOutputs.keys()):\n FITSOBJ = iii.virtualOutputs[vok]\n log.info(tag+': '+str(vok)+' = '+str(FITSOBJ))\n if vok.endswith('.fits'):\n if not hasattr(FITSOBJ, 'data'):\n FITSOBJ = FITSOBJ[0] # list of PrimaryHDU ?\n if not hasattr(FITSOBJ, 'data'):\n FITSOBJ = FITSOBJ[0] # was list of HDUList ?\n dbgname = 'DEBUG_%02d_'%(_fidx,)\n dbgname+=os.path.basename(vok)\n _fidx+=1\n FITSOBJ.writeto(dbgname)\n log.info(tag+' wrote: '+dbgname)\n log.info('\\n'+vok)\n if hasattr(FITSOBJ, 'data'):\n log.info(str(FITSOBJ._summary()))\n log.info('min and max are: '+str( (FITSOBJ.data.min(),\n FITSOBJ.data.max()) ))\n log.info('avg and sum are: '+str( (FITSOBJ.data.mean(),\n FITSOBJ.data.sum()) ))\n# log.info(str(FITSOBJ.data)[:75])\n else:\n log.info(vok+' has no .data attr')\n log.info(str(type(FITSOBJ)))\n log.info(vok+'\\n')\n log.info('-'*80)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the dark current for the WFC3 UVIS detector.", "response": "def getdarkcurrent(self,chip):\n \"\"\"\n Return the dark current for the WFC3 UVIS detector. This value\n will be contained within an instrument specific keyword.\n\n Returns\n -------\n darkcurrent: float\n The dark current value with **units of electrons**.\n \"\"\"\n darkcurrent = 0.\n\n try:\n darkcurrent = self._image[self.scienceExt, chip].header['MEANDARK']\n\n except:\n msg = \"#############################################\\n\"\n msg += \"# #\\n\"\n msg += \"# Error: #\\n\"\n msg += \"# Cannot find the value for 'MEANDARK' #\\n\"\n msg += \"# in the image header. WFC3 input images #\\n\"\n msg += \"# are expected to have this header #\\n\"\n msg += \"# keyword. #\\n\"\n msg += \"# #\\n\"\n msg += \"# Error occured in WFC3UVISInputImage class #\\n\"\n msg += \"# #\\n\"\n msg += \"#############################################\\n\"\n raise ValueError(msg)\n\n return darkcurrent"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef getdarkimg(self,chip):\n sci_chip = self._image[self.scienceExt,chip]\n\n # First attempt to get the dark image specified by the \"DARKFILE\"\n # keyword in the primary keyword of the science data.\n try:\n filename = self.header[\"DARKFILE\"]\n handle = fileutil.openImage(filename, mode='readonly', memmap=False)\n hdu = fileutil.getExtn(handle,extn=\"sci,1\")\n darkobj = hdu.data[sci_chip.ltv2:sci_chip.size2,sci_chip.ltv1:sci_chip.size1]\n\n # If the darkfile cannot be located, create the dark image from\n # what we know about the detector dark current and assume a\n # constant dark current for the whole image.\n except:\n darkobj = (np.ones(sci_chip.image_shape,\n dtype=sci_chip.image_dtype) *\n self.getdarkcurrent())\n return darkobj", "response": "Returns an array representing the dark image for the detector."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ngetting the sky image for the detector.", "response": "def getskyimg(self,chip):\n \"\"\"\n Notes\n =====\n Return an array representing the sky image for the detector. The value\n of the sky is what would actually be subtracted from the exposure by\n the skysub step.\n\n :units: electrons\n\n \"\"\"\n sci_chip = self._image[self.scienceExt,chip]\n skyimg = np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky\n if sci_chip._conversionFactor != 1.0: # If units are not already ELECTRONS\n skyimg *= self.getexptimeimg(chip)\n return skyimg"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getdarkcurrent(self,extver):\n darkcurrent=0.\n try:\n darkcurrent = self._image[self.scienceExt,extver].header['MEANDARK']\n except:\n str = \"#############################################\\n\"\n str += \"# #\\n\"\n str += \"# Error: #\\n\"\n str += \"# Cannot find the value for 'MEANDARK' #\\n\"\n str += \"# in the image header. ACS input images #\\n\"\n str += \"# are expected to have this header #\\n\"\n str += \"# keyword. #\\n\"\n str += \"# #\\n\"\n str += \"# Error occured in the ACSInputImage class #\\n\"\n str += \"# #\\n\"\n str += \"#############################################\\n\"\n raise ValueError(str)\n\n return darkcurrent", "response": "Returns the dark current value for the ACS detector."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nset the instrument parameters.", "response": "def setInstrumentParameters(self,instrpars):\n \"\"\" Sets the instrument parameters.\n \"\"\"\n pri_header = self._image[0].header\n\n if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):\n instrpars['gnkeyword'] = None\n if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):\n instrpars['rnkeyword'] = None\n if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):\n instrpars['expkeyword'] = 'EXPTIME'\n\n # We need to treat Read Noise and Gain as a special case since it is\n # not populated in the SBC primary header for the MAMA\n for chip in self.returnAllChips(extname=self.scienceExt):\n chip._gain = 1.0 #self.getInstrParameter(\"\", pri_header,\n # instrpars['gnkeyword'])\n chip._rdnoise = 0.0 #self.getInstrParameter(\"\", pri_header,\n # instrpars['rnkeyword'])\n chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header,\n instrpars['expkeyword'])\n if chip._exptime is None:\n print('ERROR: invalid instrument task parameter')\n raise ValueError\n\n # We need to determine if the user has used the default readnoise/gain value\n # since if not, they will need to supply a gain/readnoise value as well\n usingDefaultGain = instrpars['gnkeyword'] is None\n usingDefaultReadnoise = instrpars['rnkeyword'] is None\n\n # Set the default readnoise or gain values based upon the amount of user input given.\n\n # Case 1: User supplied no gain or readnoise information\n if usingDefaultReadnoise and usingDefaultGain:\n # Set the default gain and readnoise values\n self._setSBCchippars()\n # Case 2: The user has supplied a value for gain\n elif usingDefaultReadnoise and not usingDefaultGain:\n # Set the default readnoise value\n self._setDefaultSBCReadnoise()\n # Case 3: The user has supplied a value for readnoise\n elif not usingDefaultReadnoise and usingDefaultGain:\n # Set the default gain value\n self._setDefaultSBCGain()\n else:\n # In this case, the user has specified both a gain and readnoise values. Just use them as is.\n pass"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating a median array that rejects the highest pixel and computes the lowest valid pixel after mask application.", "response": "def min_med(images, weight_images, readnoise_list, exptime_list,\n background_values, weight_masks=None, combine_grow=1,\n combine_nsigma1=4, combine_nsigma2=3, fillval=False):\n \"\"\" Create a median array, rejecting the highest pixel and\n computing the lowest valid pixel after mask application.\n\n .. note::\n In this version of the mimmed algorithm we assume that the units of\n all input data is electons.\n\n Parameters\n ----------\n images : list of numpy.ndarray\n List of input data to be combined.\n\n weight_images : list of numpy.ndarray\n List of input data weight images to be combined.\n\n readnoise_list : list\n List of readnoise values to use for the input images.\n\n exptime_list : list\n List of exposure times to use for the input images.\n\n background_values : list\n List of image background values to use for the input images.\n\n weight_masks : list of numpy.ndarray, None\n List of imput data weight masks to use for pixel rejection.\n (Default: `None`)\n\n combine_grow : int\n Radius (pixels) for neighbor rejection. (Default: 1)\n\n combine_nsigma1 : float\n Significance for accepting minimum instead of median. (Default: 4)\n\n combine_nsigma2 : float\n Significance for accepting minimum instead of median. (Default: 3)\n\n fillval : bool\n Turn on use of imedian/imean. (Default: `False`)\n\n Returns\n -------\n combined_array : numpy.ndarray\n Combined array.\n\n \"\"\"\n # In this case we want to calculate two things:\n # 1) the median array, rejecting the highest pixel (thus running\n # imcombine with nlow=0, nhigh=1, nkeep=1, using the masks)\n # 2) the lowest valid pixel after applying the masks (thus running\n # imcombine with nlow=0, nhigh=3, nkeep=1, using the masks)\n #\n # We also calculate the sum of the weight files (to produce the total\n # effective exposure time for each pixel).\n #\n # The total effective background in the final image is calculated as\n # follows:\n # - convert background for each input image to counts/s\n # (divide by exptime)\n # - multiply this value by the weight image, to obtain the effective\n # background counts (in DN) for each pixel, for each image\n # - Add these images together, to obtain the total effective background\n # for the combined image.\n #\n # Once we've made these two files, then calculate the SNR based on the\n # median-pixel image, and compare with the minimum.\n\n nimages = len(images)\n combtype_median = 'imedian' if fillval else 'median'\n images = np.asarray(images)\n weight_images = np.asarray(weight_images)\n\n if weight_masks == [] or weight_masks is None:\n weight_masks = None\n mask_sum = np.zeros(images.shape[1:], dtype=np.int16)\n all_bad_idx = np.array([], dtype=np.int)\n all_bad_idy = np.array([], dtype=np.int)\n else:\n weight_masks = np.asarray(weight_masks, dtype=np.bool)\n mask_sum = np.sum(weight_masks, axis=0, dtype=np.int16)\n all_bad_idx, all_bad_idy = np.where(mask_sum == nimages)\n\n # Create a different median image based upon the number of images in the\n # input list.\n if nimages == 2:\n median_file = num_combine(\n images,\n masks=weight_masks,\n combination_type='imean' if fillval else 'mean',\n nlow=0, nhigh=0, lower=None, upper=None\n )\n\n else:\n # The value of NHIGH=1 will cause problems when there is only 1 valid\n # unmasked input image for that pixel due to a difference in behavior\n # between 'num_combine' and 'iraf.imcombine'.\n # This value may need to be adjusted on the fly based on the number of\n # inputs and the number of masked values/pixel.\n #\n median_file = num_combine(\n images,\n masks=weight_masks,\n combination_type=combtype_median,\n nlow=0, nhigh=1, lower=None, upper=None\n )\n\n # The following section of code will address the problem caused by\n # having a value of nhigh = 1. This will behave in a way similar to\n # the way the IRAF task IMCOMBINE behaves. In order to accomplish\n # this, the following procedure will be followed:\n # 1) The input masks will be summed.\n # 2) The science data will be summed.\n # 3) In the locations of the summed mask where the sum is 1 less than\n # the total number of images, the value of that location in the\n # summed science image will be used to replace the existing value\n # in the existing median_file.\n #\n # This procedure is being used to prevent too much data from being\n # thrown out of the image. Take for example the case of 3 input images.\n # In two of the images the pixel locations have been masked out.\n # Now, if nhigh is applied there will be no value to use for that\n # position. However, if this new procedure is used that value in\n # the resulting images will be the value that was rejected by the\n # nhigh rejection step.\n\n # We need to make certain that \"bad\" pixels in the sci data are set to\n # 0. That way, when the sci images are summed, the value of the sum\n # will only come from the \"good\" pixels.\n if weight_masks is None:\n sci_sum = np.sum(images, axis=0)\n if nimages == 1:\n median_file = sci_sum\n\n else:\n sci_sum = np.sum(images * np.logical_not(weight_masks), axis=0)\n # Use the summed sci image values in locations where the mask_sum\n # indicates that there is only 1 good pixel to use. The value will\n # be used in the median_file image\n idx = np.where(mask_sum == (nimages - 1))\n median_file[idx] = sci_sum[idx]\n\n # Create the minimum image from the stack of input images.\n if weight_masks is not None:\n # make a copy of images to avoid side-effect of modifying input\n # argument:\n images = images.copy()\n images[weight_masks] = np.nan\n images[:, all_bad_idx, all_bad_idy] = 0\n minimum_file = np.nanmin(images, axis=0)\n else:\n minimum_file = np.amin(images, axis=0)\n\n # Scale the weight images by the background values and add them to the bk\n # Create an image of the total effective background (in DN) per pixel:\n # (which is the sum of all the background-scaled weight files)\n s = np.asarray([bv / et for bv, et in\n zip(background_values, exptime_list)])\n bkgd_file = np.sum(weight_images * s[:, None, None], axis=0)\n\n # Scale the weight mask images by the square of the readnoise values.\n # Create an image of the total readnoise**2 per pixel\n # (which is the sum of all the input readnoise values).\n if weight_masks is None:\n rdn2 = sum((r**2 for r in readnoise_list))\n readnoise_file = rdn2 * np.ones_like(images[0])\n\n else:\n readnoise_file = np.sum(\n np.logical_not(weight_masks) *\n (np.asarray(readnoise_list)**2)[:, None, None],\n axis=0\n )\n\n # Create an image of the total effective exposure time per pixel:\n # (which is simply the sum of all the drizzle output weight files)\n weight_file = np.sum(weight_images, axis=0)\n\n # Scale up both the median and minimum arrays by the total effective\n # exposure time per pixel.\n minimum_file_weighted = minimum_file * weight_file\n median_file_weighted = median_file * weight_file\n del weight_file\n\n # Calculate the 1-sigma r.m.s.:\n # variance = median_electrons + bkgd_electrons + readnoise**2\n # rms = sqrt(variance)\n # This image has units of electrons.\n #\n # make this the abs value so that negative numbers dont throw an exception?\n rms_file2 = np.fmax(\n median_file_weighted + bkgd_file + readnoise_file,\n np.zeros_like(median_file_weighted)\n )\n rms_file = np.sqrt(rms_file2)\n del bkgd_file, readnoise_file\n\n # For the median array, calculate the n-sigma lower threshold to the array\n # and incorporate that into the pixel values.\n median_rms_file = median_file_weighted - rms_file * combine_nsigma1\n\n if combine_grow != 0:\n # Do a more sophisticated rejection: For all cases where the minimum\n # pixel will be accepted instead of the median, set a lower threshold\n # for that pixel and the ones around it (ie become less conservative\n # in rejecting the median). This is because in cases of\n # triple-incidence cosmic rays, quite often the low-lying outliers\n # of the CRs can influence the median for the initial relatively high\n # value of sigma, so a lower threshold must be used to mnake sure that\n # the minimum is selected.\n #\n # This is done as follows:\n # 1) make an image which is zero everywhere except where the minimum\n # will be accepted\n # 2) box-car smooth this image, to make these regions grow.\n # 3) In the file \"median_rms_file_electrons\", replace these pixels\n # by median - combine_nsigma2 * rms\n #\n # Then use this image in the final replacement, in the same way as for\n # the case where this option is not selected.\n minimum_flag_file = np.less(minimum_file_weighted,\n median_rms_file).astype(np.float64)\n\n # The box size value must be an integer. This is not a problem since\n # __combine_grow should always be an integer type. The combine_grow\n # column in the MDRIZTAB should also be an integer type.\n boxsize = int(2 * combine_grow + 1)\n boxshape = (boxsize, boxsize)\n minimum_grow_file = np.zeros_like(images[0])\n\n # If the boxcar convolution has failed it is potentially for\n # two reasons:\n # 1) The kernel size for the boxcar is bigger than the actual image.\n # 2) The grow parameter was specified with a value < 0. This would\n # result in an illegal boxshape kernel. The dimensions of the\n # kernel box *MUST* be integer and greater than zero.\n #\n # If the boxcar convolution has failed, try to give a meaningfull\n # explanation as to why based upon the conditionals described above.\n if boxsize <= 0:\n errormsg1 = \"############################################################\\n\"\n errormsg1 += \"# The boxcar convolution in minmed has failed. The 'grow' #\\n\"\n errormsg1 += \"# parameter must be greater than or equal to zero. You #\\n\"\n errormsg1 += \"# specified an input value for the 'grow' parameter of: #\\n\"\n errormsg1 += \" combine_grow: \" + str(combine_grow)+'\\n'\n errormsg1 += \"############################################################\\n\"\n raise ValueError(errormsg1)\n\n if boxsize > images.shape[1]:\n errormsg2 = \"############################################################\\n\"\n errormsg2 += \"# The boxcar convolution in minmed has failed. The 'grow' #\\n\"\n errormsg2 += \"# parameter specified has resulted in a boxcar kernel that #\\n\"\n errormsg2 += \"# has dimensions larger than the actual image. You #\\n\"\n errormsg2 += \"# specified an input value for the 'grow' parameter of: #\\n\"\n errormsg2 += \" combine_grow: \" + str(combine_grow) + '\\n'\n errormsg2 += \"############################################################\\n\"\n print(images.shape[1:])\n raise ValueError(errormsg2)\n\n # Attempt the boxcar convolution using the boxshape based upon the user\n # input value of \"grow\"\n ker = np.ones((boxsize, boxsize)) / float(boxsize**2)\n minimum_grow_file = signal.convolve2d(minimum_flag_file, ker,\n boundary='fill', mode='same')\n\n median_rms_file = np.where(\n np.equal(minimum_grow_file, 0),\n median_file_weighted - rms_file * combine_nsigma1,\n median_file_weighted - rms_file * combine_nsigma2\n )\n del rms_file, minimum_grow_file\n\n # Finally decide whether to use the minimim or the median (in counts/s),\n # based on whether the median is more than 3 sigma above the minimum.\n combined_array = np.where(\n np.less(minimum_file_weighted, median_rms_file),\n minimum_file,\n median_file\n )\n # Set fill regions to a pixel value of 0.\n combined_array[all_bad_idx, all_bad_idy] = 0\n\n return combined_array"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nsums a list of numarray objects.", "response": "def _sumImages(self,numarrayObjectList):\n \"\"\" Sum a list of numarray objects. \"\"\"\n if numarrayObjectList in [None, []]:\n return None\n\n tsum = np.zeros(numarrayObjectList[0].shape, dtype=numarrayObjectList[0].dtype)\n\n for image in numarrayObjectList:\n tsum += image\n\n return tsum"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef moments(data,cntr):\n total = data.sum()\n #X, Y = np.indices(data.shape)\n #x = (X*data).sum()/total\n #y = (Y*data).sum()/total\n x,y = cntr\n xi = int(x)\n yi = int(y)\n if xi < 0 or xi >= data.shape[1] or yi < 0 or yi >= data.shape[0]:\n raise ValueError\n col = data[:, xi]\n width_x = np.sqrt(abs(((np.arange(col.size)-y)**2*col).sum()/col.sum()))\n row = data[yi, :]\n width_y = np.sqrt(abs(((np.arange(row.size)-x)**2*row).sum()/row.sum()))\n height = data.max()\n return height, x, y, width_x, width_y", "response": "Returns the height x y width_x width_y the gaussian parameters of a 2D distribution by calculating its\n moments."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nremove sources which are within nsigma*fwhm/2 pixels of each other, leaving only a single valid source in that region. This algorithm only works for sources which end up sequentially next to each other based on Y position and removes enough duplicates to make the final source list more managable. It sorts the positions by Y value in order to group those at the same positions as much as possible.", "response": "def apply_nsigma_separation(fitind,fluxes,separation,niter=10):\n \"\"\"\n Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving\n only a single valid source in that region.\n\n This algorithm only works for sources which end up sequentially next to each other\n based on Y position and removes enough duplicates to make the final source list more\n managable. It sorts the positions by Y value in order to group those at the\n same positions as much as possible.\n \"\"\"\n for n in range(niter):\n if len(fitind) < 1:\n break\n fitarr = np.array(fitind,np.float32)\n fluxarr = np.array(fluxes,np.float32)\n inpind = np.argsort(fitarr[:,1])\n npind = fitarr[inpind]\n fluxind = fluxarr[inpind]\n fitind = npind.tolist()\n fluxes = fluxind.tolist()\n dx = npind[1:,0] - npind[:-1,0]\n dy = npind[1:,1] - npind[:-1,1]\n dr = np.sqrt(np.power(dx,2)+np.power(dy,2))\n nsame = np.where(dr <= separation)[0]\n if nsame.shape[0] > 0:\n for ind in nsame[-1::-1]:\n #continue # <- turn off filtering by source separation\n del fitind[ind]\n del fluxes[ind]\n else:\n break\n return fitind,fluxes"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef xy_round(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin=None,datamax=None):\n nyk,nxk = ker2d.shape\n if datamin is None:\n datamin = data.min()\n if datamax is None:\n datamax = data.max()\n\n # call C function for speed now...\n xy_val = cdriz.arrxyround(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin,datamax)\n if xy_val is None:\n x = None\n y = None\n round = None\n else:\n x = xy_val[0]\n y = xy_val[1]\n round = xy_val[2]\n\n return x,y,round", "response": "Compute center of source\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk,\n datamin, datamax):\n \"\"\"\n sharp_round -- Compute first estimate of the roundness and sharpness of the\n detected objects.\n\n A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.\n \"\"\"\n\n # Compute the first estimate of roundness:\n sum2 = np.sum(s2m*density)\n sum4 = np.sum(s4m*abs(density))\n if sum2 == 0.0:\n round = 0.0\n elif sum4 <= 0.0: # eps?\n round = None\n else:\n round = 2.0 * sum2 / sum4\n\n # Eliminate the sharpness test if the central pixel is bad:\n mid_data_pix = data[yc, xc]\n mid_dens_pix = density[yc, xc]\n if mid_data_pix > datamax:\n return True, round, None\n if mid_data_pix < datamin:\n return False, round, None\n\n ########################\n # Sharpness statistics:\n\n satur = np.max(kskip*data) > datamax\n\n # Exclude pixels (create a mask) outside the [datamin, datamax] range:\n uskip = np.where((data >= datamin) & (data <= datamax), 1, 0)\n # Update the mask with the \"skipped\" values from the convolution kernel:\n uskip *= kskip\n # Also, exclude central pixel:\n uskip[yc, xc] = 0\n\n npixels = np.sum(uskip)\n if (npixels < 1 or mid_dens_pix <= 0.0):\n return satur, round, None\n\n sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix\n #sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix\n\n return satur, round, sharp", "response": "Compute the first estimate of the sharpness of the object."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the roundness of the image.", "response": "def roundness(im):\n \"\"\"\n from astropy.io import fits as pyfits\n data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)\n star0=data[403:412,423:432]\n star=data[396:432,3522:3558]\n In [53]: findobj.roundness(star0)\n Out[53]: 0.99401955054989544\n In [54]: findobj.roundness(star)\n Out[54]: 0.83091919980660645\n\n \"\"\"\n perimeter = im.shape[0]*2 +im.shape[1]*2 -4\n area = im.size\n return 4*np.pi*area/perimeter**2"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncompute the immoments of a single image.", "response": "def immoments(im, p,q):\n x = list(range(im.shape[1]))\n y = list(range(im.shape[0]))\n #coord=np.array([x.flatten(),y.flatten()]).T\n \"\"\"\n moment = 0\n momentx = 0\n for i in x.flatten():\n moment+=momentx\n sumx=0\n for j in y.flatten():\n sumx+=i**0*j**0*star0[i,j]\n \"\"\"\n moment = np.sum([i**p*j**q*im[i,j] for j in x for i in y], dtype=np.float64)\n return moment"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef centroid(im):\n # These calls point to Python version of moments function\n m00 = cdriz.arrmoments(im,0,0)\n m10 = cdriz.arrmoments(im, 1,0)\n m01 = cdriz.arrmoments(im,0,1)\n\n ycen = m10 / m00\n xcen = m01 / m00\n return xcen, ycen", "response": "Computes the centroid of an image using the image moments"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting the entry in MDRIZTAB where task parameters live.", "response": "def getMdriztabParameters(files):\n \"\"\" Gets entry in MDRIZTAB where task parameters live.\n This method returns a record array mapping the selected\n row.\n \"\"\"\n\n # Get the MDRIZTAB table file name from the primary header.\n # It is gotten from the first file in the input list. No\n # consistency checks are performed.\n _fileName = files[0]\n _header = fileutil.getHeader(_fileName)\n if 'MDRIZTAB' in _header:\n _tableName = _header['MDRIZTAB']\n else:\n raise KeyError(\"No MDRIZTAB found in file \" + _fileName)\n\n _tableName = fileutil.osfn(_tableName)\n\n # Now get the filters from the primary header.\n _filters = fileutil.getFilterNames(_header)\n\n # Specifically check to see whether the MDRIZTAB file can be found\n mtab_path = os.path.split(_tableName)[0] # protect against no path given for _tableName\n if mtab_path and not os.path.exists(mtab_path): # check path first, if given\n raise IOError(\"Directory for MDRIZTAB '%s' could not be accessed!\"%mtab_path)\n if not os.path.exists(_tableName): # then check for the table itself\n raise IOError(\"MDRIZTAB table '%s' could not be found!\"%_tableName)\n\n # Open MDRIZTAB file.\n try:\n _mdriztab = fits.open(_tableName, memmap=False)\n except:\n raise IOError(\"MDRIZTAB table '%s' not valid!\" % _tableName)\n\n # Look for matching rows based on filter name. If no\n # match, pick up rows for the default filter.\n _rows = _getRowsByFilter(_mdriztab, _filters)\n if _rows == []:\n _rows = _getRowsByFilter(_mdriztab, 'ANY')\n\n # Now look for the row that matches the number of images.\n # The logic below assumes that rows for a given filter\n # are arranged in ascending order of the 'numimage' field.\n _nimages = len(files)\n _row = 0\n for i in _rows:\n _numimages = _mdriztab[1].data.field('numimages')[i]\n if _nimages >= _numimages:\n _row = i\n print('- MDRIZTAB: AstroDrizzle parameters read from row %s.'%(_row+1))\n\n mpars = _mdriztab[1].data[_row]\n _mdriztab.close()\n\n interpreted = _interpretMdriztabPars(mpars)\n\n if \"staticfile\" in interpreted:\n interpreted.pop(\"staticfile\")\n\n return interpreted"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ninterpreting the parameters in the MDRIZTAB record and update the master parameters list with those values.", "response": "def _interpretMdriztabPars(rec):\n \"\"\"\n Collect task parameters from the MDRIZTAB record and\n update the master parameters list with those values\n\n Note that parameters read from the MDRIZTAB record must\n be cleaned up in a similar way that parameters read\n from the user interface are.\n \"\"\"\n tabdict = {}\n # for each entry in the record...\n for indx in range(len(rec.array.names)):\n # ... get the name, format, and value.\n _name = rec.array.names[indx]\n _format = rec.array.formats[indx]\n _value = rec.field(_name)\n\n # Translate names from MDRIZTAB columns names to\n # input parameter names found in IRAF par file.\n #\n #if _name.find('final') > -1: _name = 'driz_'+_name\n if _name in ['shiftfile','mdriztab']:\n continue\n drizstep_names = ['driz_sep_','final_']\n if _name in ['refimage','bits']:\n for dnames in drizstep_names:\n tabdict[dnames+_name] = _value\n continue\n if _name in ['driz_sep_bits','final_bits']:\n tabdict[_name] = str(_value)\n continue\n if _name == 'coeffs':\n _val = True\n if _value in ['INDEF',None,\"None\",'',' ']: _val = False\n tabdict[_name] = _val\n continue\n\n par_table = {'subsky':'skysub','crbitval':'crbit','readnoise':'rdnoise'}\n if _name in par_table:\n _name = par_table[_name]\n\n # We do not care about the first two columns at this point\n # as they are only used for selecting the rows\n if _name != 'filter' and _name != 'numimages':\n # start by determining the format type of the parameter\n _fmt = findFormat(_format)\n\n # Based on format type, apply proper conversion/cleaning\n if (_fmt == 'a') or (_fmt == 'A'):\n _val = cleanBlank(_value)\n if _val is None:\n _val = ''\n\n elif (_format == 'i1') or (_format=='1L'):\n _val = toBoolean(_value)\n\n elif (_format == 'i4') or (_format == '1J'):\n _val = cleanInt(_value)\n\n elif ('E' in _format) or (_format == 'f4') :\n _val = cleanNaN(_value)\n\n else:\n print('MDRIZTAB column ',_name,' has unrecognized format',_format)\n raise ValueError\n if _name in ['ra','dec']:\n for dnames in drizstep_names:\n tabdict[dnames+_name] = _val\n\n else:\n tabdict[_name] = _val\n\n return tabdict"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ninterpret parameters from TEAL configObj interface as set interactively by the user and build a new WCS instance.", "response": "def run(configObj,wcsmap=None):\n \"\"\" Interpret parameters from TEAL/configObj interface as set interactively\n by the user and build the new WCS instance\n \"\"\"\n\n distortion_pars = configObj['Distortion Model']\n\n outwcs = build(configObj['outwcs'], configObj['wcsname'],\n configObj['refimage'], undistort = configObj['undistort'],\n usecoeffs=distortion_pars['applycoeffs'], coeffsfile=distortion_pars['coeffsfile'],\n **configObj['User WCS Parameters'])"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a new WCS instance from a reference image and a user supplied parameters.", "response": "def build(outname, wcsname, refimage, undistort=False,\n applycoeffs=False, coeffsfile=None, **wcspars):\n \"\"\" Core functionality to create a WCS instance from a reference image WCS,\n user supplied parameters or user adjusted reference WCS.\n The distortion information can either be read in as part of the reference\n image WCS or given in 'coeffsfile'.\n\n Parameters\n ----------\n outname : string\n filename of output WCS\n wcsname : string\n WCSNAME ID for generated WCS\n refimage : string\n filename of image with source WCS used as basis for output WCS\n undistort : bool\n Create an undistorted WCS?\n applycoeffs : bool\n Apply coefficients from refimage to generate undistorted WCS?\n coeffsfile : string\n If specified, read distortion coeffs from separate file\n\n\n \"\"\"\n\n # Insure that the User WCS parameters have values for all the parameters,\n # even if that value is 'None'\n user_wcs_pars = convert_user_pars(wcspars)\n userwcs = wcspars['userwcs']\n\n \"\"\"\n Use cases to document the logic required to interpret the parameters\n\n WCS generation based on refimage/userwcs parameters\n -------------------------------------------------------------\n refimage == None, userwcs == False:\n *NO WCS specified*\n => print a WARNING message and return without doing anything\n refimage == None, userwcs == True:\n => Create WCS without a distortion model entirely from user parameters*\n refimage != None, userwcs == False:\n => No user WCS parameters specified\n => Simply use refimage WCS as specified\n refimage != None, userwcs == True:\n => Update refimage WCS with user specified values*\n\n Apply distortion and generate final headerlet using processed WCS\n -----------------------------------------------------------------\n refimage == None, userwcs == True:\n *Output WCS generated entirely from user supplied parameters*\n Case 1: applycoeffs == False, undistort == True/False (ignored)\n => no distortion model to interpret\n => generate undistorted headerlet with no distortion model\n Case 2: applycoeffs == True/False, undistort == True\n => ignore any user specified distortion model\n => generate undistorted headerlet with no distortion model\n Case 3: applycoeffs == True, undistort == False\n => WCS from scratch combined with distortion model from another image\n => generate headerlet with distortion model\n\n refimage != None, userwcs == True/False:\n *Output WCS generated from reference image possibly modified by user parameters*\n Case 4: applycoeffs == False, undistort == True\n => If refimage has distortion, remove it\n => generate undistorted headerlet with no distortion model\n Case 5: applycoeffs == False, undistort == False\n => Leave refimage distortion model (if any) unmodified\n => generate a headerlet using same distortion model (if any) as refimage\n Case 6: applycoeffs == True, undistort == False\n => Update refimage with distortion model with user-specified model\n => generate a headerlet with a distortion model\n Case 7: applycoeffs == True, undistort == True\n => ignore user specified distortion model and undistort WCS\n => generate a headerlet without a distortion model\n \"\"\"\n ### Build WCS from refimage and/or user pars\n if util.is_blank(refimage) and not userwcs:\n print('WARNING: No WCS specified... No WCS created!')\n return\n customwcs = None\n if util.is_blank(refimage) and userwcs:\n # create HSTWCS object from user parameters\n complete_wcs = True\n for key in user_wcs_pars:\n if util.is_blank(user_wcs_pars[key]):\n complete_wcs = False\n break\n if complete_wcs:\n customwcs = wcs_functions.build_hstwcs(user_wcs_pars['crval1'],user_wcs_pars['crval2'],\n user_wcs_pars['crpix1'],user_wcs_pars['crpix2'],\n user_wcs_pars['naxis1'],user_wcs_pars['naxis2'],\n user_wcs_pars['pscale'],user_wcs_pars['orientat'])\n else:\n print('WARNING: Not enough WCS information provided by user!')\n raise ValueError\n\n if not util.is_blank(refimage):\n refwcs = stwcs.wcsutil.HSTWCS(refimage)\n else:\n refwcs = customwcs\n\n ### Apply distortion model (if any) to update WCS\n if applycoeffs and not util.is_blank(coeffsfile):\n if not util.is_blank(refimage):\n replace_model(refwcs, coeffsfile)\n else:\n if not undistort:\n add_model(refwcs,coeffsfile)\n # Only working with custom WCS from user, no distortion\n # so apply model to WCS, including modifying the CD matrix\n apply_model(refwcs)\n\n ### Create undistorted WCS, if requested\n if undistort:\n outwcs = undistortWCS(refwcs)\n else:\n outwcs = refwcs\n\n if userwcs:\n # replace (some/all?) WCS values from refimage with user WCS values\n # by running 'updatewcs' functions on input WCS\n outwcs = mergewcs(outwcs,customwcs,user_wcs_pars)\n\n\n ### Create the final headerlet and write it out, if specified\n if not util.is_blank(refimage):\n template = refimage\n elif not util.is_blank(coeffsfile):\n template = coeffsfile\n else:\n template = None\n\n\n # create default WCSNAME if None was given\n wcsname = create_WCSname(wcsname)\n print('Creating final headerlet with name ',wcsname,' using template ',template)\n outhdr = generate_headerlet(outwcs,template,wcsname,outname=outname)\n\n # synchronize this new WCS with the rest of the chips in the image\n for ext in outhdr:\n if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS':\n ext_wcs = wcsutil.HSTWCS(ext)\n stwcs.updatewcs.makewcs.MakeWCS.updateWCS(ext_wcs,outwcs)\n\n return outwcs"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_WCSname(wcsname):\n if util.is_blank(wcsname):\n ptime = fileutil.getDate()\n wcsname = \"User_\"+ptime\n\n return wcsname", "response": "Verify that a valid WCSNAME has been provided and create a new one based on current date."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef convert_user_pars(wcspars):\n default_pars = default_user_wcs.copy()\n for kw in user_hstwcs_pars:\n default_pars[user_hstwcs_pars[kw]] = wcspars[kw]\n return default_pars", "response": "Convert the parameters provided by the configObj into the corresponding\nCTYPE parameters."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef mergewcs(outwcs, customwcs, wcspars):\n # start by working on a copy of the refwcs\n if outwcs.sip is not None:\n wcslin = stwcs.distortion.utils.undistortWCS(outwcs)\n outwcs.wcs.cd = wcslin.wcs.cd\n outwcs.wcs.set()\n outwcs.setOrient()\n outwcs.setPscale()\n else:\n wcslin = outwcs\n if customwcs is None:\n # update valid pars from wcspars\n if wcspars['crval1'] is not None:\n outwcs.wcs.crval = np.array([wcspars['crval1'],wcspars['crval2']])\n if wcspars['crpix1'] is not None:\n outwcs.wcs.crpix = np.array([wcspars['crpix1'],wcspars['crpix2']])\n if wcspars['naxis1'] is not None:\n outwcs.pixel_shape = (wcspars['naxis1'], wcspars['naxis2'])\n outwcs.wcs.crpix = np.array(outwcs.pixel_shape) / 2.0\n\n pscale = wcspars['pscale']\n orient = wcspars['orientat']\n if pscale is not None or orient is not None:\n if pscale is None: pscale = wcslin.pscale\n if orient is None: orient = wcslin.orientat\n pix_ratio = pscale/wcslin.pscale\n delta_rot = wcslin.orientat - orient\n delta_rot_mat = fileutil.buildRotMatrix(delta_rot)\n outwcs.wcs.cd = np.dot(outwcs.wcs.cd,delta_rot_mat)*pix_ratio\n # apply model to new linear CD matrix\n apply_model(outwcs)\n else:\n # A new fully described WCS was provided in customwcs\n outwcs.wcs.cd = customwcs.wcs.cd\n outwcs.wcs.crval = customwcs.wcs.crval\n outwcs.wcs.crpix = customwcs.wcs.crpix\n outwcs.pixel_shape = customwcs.pixel_shape\n return outwcs", "response": "Merge the WCS keywords from user specified values into a full HSTWCS object."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef add_model(refwcs, newcoeffs):\n # Update refwcs with distortion model\n for kw in model_attrs:\n if newcoeffs.__dict__[key] is not None:\n refwcs.__dict__[key] = newcoeffs.__dict__[key]", "response": "Add distortion model to existing HSTWCS object"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\napply distortion model to WCS including modifying CD with linear distortion terms", "response": "def apply_model(refwcs):\n \"\"\" Apply distortion model to WCS, including modifying\n CD with linear distortion terms\n\n \"\"\"\n # apply distortion model to CD matrix\n if 'ocx10' in refwcs.__dict__ and refwcs.ocx10 is not None:\n linmat = np.array([[refwcs.ocx11,refwcs.ocx10],[refwcs.ocy11,refwcs.ocy10]])/refwcs.idcscale\n refwcs.wcs.cd = np.dot(refwcs.wcs.cd,linmat)\n\n refwcs.wcs.set()\n refwcs.setOrient()\n refwcs.setPscale()"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef replace_model(refwcs, newcoeffs):\n print('WARNING:')\n print(' Replacing existing distortion model with one')\n print(' not necessarily matched to the observation!')\n # create linear version of WCS to be updated by new model\n wcslin = stwcs.distortion.utils.undistortWCS(refwcs)\n outwcs = refwcs.deepcopy()\n outwcs.wcs.cd = wcslin.wcs.cd\n outwcs.wcs.set()\n outwcs.setOrient()\n outwcs.setPscale()\n # add new model to updated WCS object\n add_model(outwcs,newcoeffs)\n # Update CD matrix with new model\n apply_model(outwcs)\n\n # replace original input WCS with newly updated WCS\n refwcs = outwcs.deepcopy()", "response": "Replace the distortion model in a current WCS with a new model"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ngenerate an undistorted HSTWCS object from an HSTWCS object with a distortion model", "response": "def undistortWCS(refwcs):\n \"\"\" Generate an undistorted HSTWCS from an HSTWCS object with a distortion model\n \"\"\"\n wcslin = stwcs.distortion.utils.output_wcs([refwcs])\n\n outwcs = stwcs.wcsutil.HSTWCS()\n outwcs.wcs = wcslin.wcs\n outwcs.wcs.set()\n outwcs.setPscale()\n outwcs.setOrient()\n outwcs.sip = None\n\n # Update instrument specific keywords\n outwcs.inst_kw = refwcs.inst_kw\n for kw in refwcs.inst_kw:\n outwcs.__dict__[kw] = refwcs.__dict__[kw]\n outwcs.pixel_shape = wcslin.pixel_shape\n\n return outwcs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ngenerate a headerlet based on the updated HSTWCS object and the template file.", "response": "def generate_headerlet(outwcs,template,wcsname,outname=None):\n \"\"\" Create a headerlet based on the updated HSTWCS object\n\n This function uses 'template' as the basis for the headerlet.\n This file can either be the original wcspars['refimage'] or\n wcspars['coeffsfile'], in this order of preference.\n\n If 'template' is None, then a simple Headerlet will be\n generated with a single SIPWCS extension and no distortion\n \"\"\"\n # Create header object from HSTWCS object\n siphdr = True\n if outwcs.sip is None:\n siphdr = False\n outwcs_hdr = outwcs.wcs2header(sip2hdr=siphdr)\n outwcs_hdr['NPIX1'] = outwcs.pixel_shape[0]\n outwcs_hdr['NPIX2'] = outwcs.pixel_shape[1]\n\n # create headerlet object in memory; either from a file or from scratch\n if template is not None and siphdr:\n print('Creating headerlet from template...')\n fname,extn = fileutil.parseFilename(template)\n extnum = fileutil.parseExtn(extn)\n extname = ('sipwcs',extnum[1])\n hdrlet = headerlet.createHeaderlet(fname,wcsname)\n # update hdrlet with header values from outwcs\n for kw in outwcs_hdr.items():\n hdrlet[extname].header[kw[0]] = kw[1]\n hdrlet[extname].header['WCSNAME'] = wcsname\n else:\n print('Creating headerlet from scratch...')\n hdrlet = fits.HDUList()\n hdrlet.append(fits.PrimaryHDU())\n siphdr = fits.ImageHDU(header=outwcs_hdr)\n siphdr.header['EXTNAME'] = 'SIPWCS'\n siphdr.header['WCSNAME'] = wcsname\n hdrlet.append(siphdr)\n\n # Write out header to a file as the final product\n if outname is not None:\n if outname.find('_hdr.fits') < 0:\n outname += '_hdr.fits'\n if os.path.exists(outname):\n print('Overwrite existing file \"%s\"'%outname)\n os.remove(outname)\n hdrlet.writeto(outname)\n print('Wrote out headerlet :',outname)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef qderiv(array): # TAKE THE ABSOLUTE DERIVATIVE OF A NUMARRY OBJECT\n\n #Create 2 empty arrays in memory of the same dimensions as 'array'\n tmpArray = np.zeros(array.shape,dtype=np.float64)\n outArray = np.zeros(array.shape, dtype=np.float64)\n\n # Get the length of an array side\n (naxis1,naxis2) = array.shape\n #print \"The input image size is (\",naxis1,\",\",naxis2,\").\"\n\n#Main derivate loop:\n #Shift images +/- 1 in Y.\n for y in range(-1,2,2):\n if y == -1:\n #shift input image 1 pixel right\n tmpArray[0:(naxis1-1),1:(naxis2-1)] = array[0:(naxis1-1),0:(naxis2-2)]\n #print \"Y shift = 1\"\n else:\n #shift input image 1 pixel left\n tmpArray[0:(naxis1-1),0:(naxis2-2)] = array[0:(naxis1-1),1:(naxis2-1)]\n #print \"Y shift = -1\"\n #print \"call _absoluteSubtract()\"\n (tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray)\n\n #Shift images +/- 1 in X.\n for x in range(-1,2,2):\n if x == -1:\n #shift input image 1 pixel right\n tmpArray[1:(naxis1-1),0:(naxis2-1)] = array[0:(naxis1-2),0:(naxis2-1)]\n #print \"X shift = 1\"\n else:\n #shift input image 1 pixel left\n tmpArray[0:(naxis1-2),0:(naxis2-1)] = array[1:(naxis1-1),0:(naxis2-1)]\n #print \"X shift = -1\"\n #print \"call _absoluteSubtract()\"\n (tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray)\n\n\n\n return outArray.astype(np.float32)", "response": "Take the absolute derivate of an image in memory."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef randomSelectFromCSV(tableName, numEntries, seedValue):\n\n # Initialize the random number generator\n seed(seedValue)\n\n # Get the contents of the table\n dataTable = Table.read(tableName, format='ascii.csv')\n numRows = len(dataTable)\n\n # Generate a sequence of integers the size of the table, and then\n # obtain a random subset of the sequence with no duplicate selections\n sequence = list(range(numRows))\n subset = sample(sequence, numEntries)\n\n # Extract the subset rows...\n outputTable = dataTable[subset]\n #outputTable = dataTable[0:numEntries]\n\n # Returns the outputTable which is an Astropy Table object\n return(outputTable)", "response": "Function to extract random entries from a master CSV file"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_hstwcs(filename,hdulist,extnum):\n hdrwcs = wcsutil.HSTWCS(hdulist,ext=extnum)\n hdrwcs.filename = filename\n hdrwcs.expname = hdulist[extnum].header['expname']\n hdrwcs.extver = hdulist[extnum].header['extver']\n\n return hdrwcs", "response": "Return the HSTWCS object for a given chip."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef update_linCD(cdmat, delta_rot=0.0, delta_scale=1.0, cx=[0.0,1.0], cy=[1.0,0.0]):\n rotmat = fileutil.buildRotMatrix(delta_rot)*delta_scale\n new_lincd = np.dot(cdmat,rotmat)\n\n cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]])\n new_cd = np.dot(new_lincd,cxymat)\n\n return new_cd", "response": "Modify an existing linear CD matrix with rotation and scale and scale changes\n and return a distorted linear CD matrix."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a un - distorted CD matrix from the basic inputs.", "response": "def create_CD(orient, scale, cx=None, cy=None):\n \"\"\" Create a (un?)distorted CD matrix from the basic inputs.\n\n The 'cx' and 'cy' parameters, if given, provide the X and Y coefficients of\n the distortion as returned by reading the IDCTAB. Only the first 2 elements\n are used and should correspond to the 'OC[X/Y]10' and 'OC[X/Y]11' terms in that\n order as read from the expanded SIP headers.\n\n The units of 'scale' should be 'arcseconds/pixel' of the reference pixel.\n The value of 'orient' should be the absolute orientation on the sky of the\n reference pixel.\n\n \"\"\"\n cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]])\n rotmat = fileutil.buildRotMatrix(orient)*scale/3600.\n new_cd = np.dot(rotmat,cxymat)\n return new_cd"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef ddtohms(xsky,ysky,verbose=False,precision=6):\n xskyh = xsky /15.\n xskym = (xskyh - np.floor(xskyh)) * 60.\n xskys = (xskym - np.floor(xskym)) * 60.\n yskym = (np.abs(ysky) - np.floor(np.abs(ysky))) * 60.\n yskys = (yskym - np.floor(yskym)) * 60.\n\n fmt = \"%.\"+repr(precision)+\"f\"\n if isinstance(xskyh,np.ndarray):\n rah,dech = [],[]\n for i in range(len(xskyh)):\n rastr = repr(int(xskyh[i]))+':'+repr(int(xskym[i]))+':'+fmt%(xskys[i])\n decstr = repr(int(ysky[i]))+':'+repr(int(yskym[i]))+':'+fmt%(yskys[i])\n rah.append(rastr)\n dech.append(decstr)\n if verbose:\n print('RA = ',rastr,', Dec = ',decstr)\n else:\n rastr = repr(int(xskyh))+':'+repr(int(xskym))+':'+fmt%(xskys)\n decstr = repr(int(ysky))+':'+repr(int(yskym))+':'+fmt%(yskys)\n rah = rastr\n dech = decstr\n if verbose:\n print('RA = ',rastr,', Dec = ',decstr)\n\n return rah,dech", "response": "Convert sky position from decimal degrees to HMS format."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef make_outputwcs(imageObjectList, output, configObj=None, perfect=False):\n if not isinstance(imageObjectList,list):\n imageObjectList = [imageObjectList]\n\n # Compute default output WCS, replace later if user specifies a refimage\n hstwcs_list = []\n undistort=True\n for img in imageObjectList:\n chip_wcs = copy.deepcopy(img.getKeywordList('wcs'))\n # IF the user turned off use of coeffs (coeffs==False)\n if not configObj['coeffs']:\n for cw in chip_wcs:\n # Turn off distortion model for each input\n cw.sip = None\n cw.cpdis1 = None\n cw.cpdis2 = None\n cw.det2im = None\n undistort=False\n hstwcs_list += chip_wcs\n if not undistort and len(hstwcs_list) == 1:\n default_wcs = hstwcs_list[0].deepcopy()\n else:\n default_wcs = utils.output_wcs(hstwcs_list, undistort=undistort)\n\n if perfect:\n default_wcs.wcs.cd = make_perfect_cd(default_wcs)\n\n # Turn WCS instances into WCSObject instances\n outwcs = createWCSObject(output, default_wcs, imageObjectList)\n\n # Merge in user-specified attributes for the output WCS\n # as recorded in the input configObj object.\n final_pars = DEFAULT_WCS_PARS.copy()\n\n # More interpretation of the configObj needs to be done here to translate\n # the input parameter names to those understood by 'mergeWCS' as defined\n # by the DEFAULT_WCS_PARS dictionary.\n single_step = configObj[util.getSectionName(configObj, 3)]\n singleParDict = configObj[util.getSectionName(configObj, '3a')].copy()\n if single_step['driz_separate'] and singleParDict['driz_sep_wcs']:\n single_pars = DEFAULT_WCS_PARS.copy()\n del singleParDict['driz_sep_wcs']\n keyname = 'driz_sep_'\n for key in singleParDict:\n k = key[len(keyname):]\n if k != 'refimage':\n single_pars[k] = singleParDict[key]\n\n # Now, account for any user-specified reference image\n def_wcs = default_wcs.deepcopy()\n if singleParDict[keyname + 'refimage']:\n default_wcs = wcsutil.HSTWCS(singleParDict[keyname + 'refimage'])\n\n ### Create single_wcs instance based on user parameters\n outwcs.single_wcs = mergeWCS(default_wcs, single_pars)\n # restore global default WCS to original value so single_drizzle WCS does not\n # influence final_drizzle WCS\n default_wcs = def_wcs.deepcopy()\n\n final_step = configObj[util.getSectionName(configObj,7)]\n finalParDict = configObj[util.getSectionName(configObj,'7a')].copy()\n if final_step['driz_combine'] and finalParDict['final_wcs']:\n del finalParDict['final_wcs']\n keyname = 'final_'\n for key in finalParDict:\n k = key[len(keyname):]\n if k != 'refimage':\n final_pars[k] = finalParDict[key]\n\n # Now, account for any user-specified reference image\n if finalParDict[keyname + 'refimage']:\n rootname,extnum = fileutil.parseFilename(finalParDict[keyname+'refimage'])\n extnum = util.findWCSExtn(finalParDict[keyname+'refimage'])\n print('Creating OUTPUT WCS from {}[{}]'.format(rootname,extnum))\n default_wcs = wcsutil.HSTWCS('{}[{}]'.format(rootname,extnum))\n\n ### Create single_wcs instance based on user parameters\n outwcs.final_wcs = mergeWCS(default_wcs, final_pars)\n outwcs.wcs = outwcs.final_wcs.copy()\n\n # Apply user settings to create custom output_wcs instances\n # for each drizzle step\n updateImageWCS(imageObjectList, outwcs)\n\n return outwcs", "response": "Creates a new output WCS for the input imageList and outputs the output frame."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncreating a perfect CD matrix from the WCS.", "response": "def make_perfect_cd(wcs):\n \"\"\" Create a perfect (square, orthogonal, undistorted) CD matrix from the\n input WCS.\n \"\"\"\n def_scale = (wcs.pscale) / 3600.\n def_orientat = np.deg2rad(wcs.orientat)\n perfect_cd = def_scale * np.array(\n [[-np.cos(def_orientat),np.sin(def_orientat)],\n [np.sin(def_orientat),np.cos(def_orientat)]]\n )\n return perfect_cd"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef calcNewEdges(wcs, shape):\n\n naxis1 = shape[1]\n naxis2 = shape[0]\n # build up arrays for pixel positions for the edges\n # These arrays need to be: array([(x,y),(x1,y1),...])\n numpix = naxis1*2 + naxis2*2\n border = np.zeros(shape=(numpix,2),dtype=np.float64)\n\n # Now determine the appropriate values for this array\n # We also need to account for any subarray offsets\n xmin = 1.\n xmax = naxis1\n ymin = 1.\n ymax = naxis2\n\n # Build range of pixel values for each side\n # Add 1 to make them consistent with pixel numbering in IRAF\n # Also include the LTV offsets to represent position in full chip\n # since the model works relative to full chip positions.\n xside = np.arange(naxis1) + xmin\n yside = np.arange(naxis2) + ymin\n\n #Now apply them to the array to generate the appropriate tuples\n #bottom\n _range0 = 0\n _range1 = naxis1\n border[_range0:_range1,0] = xside\n border[_range0:_range1,1] = ymin\n #top\n _range0 = _range1\n _range1 = _range0 + naxis1\n border[_range0:_range1,0] = xside\n border[_range0:_range1,1] = ymax\n #left\n _range0 = _range1\n _range1 = _range0 + naxis2\n border[_range0:_range1,0] = xmin\n border[_range0:_range1,1] = yside\n #right\n _range0 = _range1\n _range1 = _range0 + naxis2\n border[_range0:_range1,0] = xmax\n border[_range0:_range1,1] = yside\n\n edges = wcs.all_pix2world(border[:,0],border[:,1],1)\n return edges", "response": "This method calculates the sky coordinates for all the pixels around the image and the edge of the image."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nconvert a PyWCS WCS object into a WCSObject instance.", "response": "def createWCSObject(output,default_wcs,imageObjectList):\n \"\"\"Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance.\"\"\"\n from . import imageObject\n outwcs = imageObject.WCSObject(output)\n outwcs.default_wcs = default_wcs\n outwcs.wcs = default_wcs.copy()\n outwcs.final_wcs = default_wcs.copy()\n outwcs.single_wcs = default_wcs.copy()\n\n outwcs.updateContextImage(imageObjectList[0].createContext)\n\n #\n # Add exptime information for use with drizzle\n #\n outwcs._exptime,outwcs._expstart,outwcs._expend = util.compute_texptime(imageObjectList)\n\n outwcs.nimages = util.countImages(imageObjectList)\n\n return outwcs"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nremoves all alternate WCS solutions from the header and removes all OPUS WCS Keywords from the header.", "response": "def removeAllAltWCS(hdulist,extlist):\n \"\"\"\n Removes all alternate WCS solutions from the header\n \"\"\"\n original_logging_level = log.level\n log.setLevel(logutil.logging.WARNING)\n\n try:\n hdr = hdulist[extlist[0]].header\n wkeys = altwcs.wcskeys(hdr)\n if ' ' in wkeys:\n wkeys.remove(' ')\n for extn in extlist:\n for wkey in wkeys:\n if wkey == 'O':\n continue\n altwcs.deleteWCS(hdulist,extn,wkey)\n\n # Forcibly remove OPUS WCS Keywords, since deleteWCS will not do it\n hwcs = readAltWCS(hdulist,extn,wcskey='O')\n\n if hwcs is None:\n continue\n\n for k in hwcs.keys():\n if k not in ['DATE-OBS','MJD-OBS'] and k in hdr:\n try:\n del hdr[k]\n except KeyError:\n pass\n except:\n raise\n\n finally:\n log.setLevel(original_logging_level)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nrestores WCS information to default values and update imageObject accordingly.", "response": "def restoreDefaultWCS(imageObjectList, output_wcs):\n \"\"\" Restore WCS information to default values, and update imageObject\n accordingly.\n \"\"\"\n if not isinstance(imageObjectList,list):\n imageObjectList = [imageObjectList]\n\n output_wcs.restoreWCS()\n\n updateImageWCS(imageObjectList, output_wcs)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _py2round(x):\n if hasattr(x, '__iter__'):\n rx = np.empty_like(x)\n m = x >= 0.0\n rx[m] = np.floor(x[m] + 0.5)\n m = np.logical_not(m)\n rx[m] = np.ceil(x[m] - 0.5)\n return rx\n\n else:\n if x >= 0.0:\n return np.floor(x + 0.5)\n else:\n return np.ceil(x - 0.5)", "response": "This function returns a rounded up value of the argument similar to Python 2."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef mergeWCS(default_wcs, user_pars):\n #\n # Start by making a copy of the input WCS...\n #\n outwcs = default_wcs.deepcopy()\n\n # If there are no user set parameters, just return a copy of\n # the original WCS:\n if all([upar is None for upar in user_pars.values()]):\n return outwcs\n\n if _check_custom_WCS_pars('ra', 'dec', user_pars):\n _crval = (user_pars['ra'], user_pars['dec'])\n else:\n _crval = None\n\n if ('scale' in user_pars and user_pars['scale'] is not None and\n not _check_close_scale(user_pars['scale'], outwcs.pscale)):\n _scale = user_pars['scale']\n _ratio = outwcs.pscale / _scale\n else:\n _ratio = None\n _scale = None\n\n if ('rot' not in user_pars) or user_pars['rot'] is None:\n _delta_rot = None\n else:\n _delta_rot = outwcs.orientat - user_pars['rot']\n if _delta_rot == 0.0:\n _delta_rot = None\n\n if _check_custom_WCS_pars('crpix1', 'crpix2', user_pars):\n _crpix = (user_pars['crpix1'], user_pars['crpix2'])\n else:\n _crpix = None\n\n shape = None\n\n if _check_custom_WCS_pars('outnx', 'outny', user_pars):\n shape = (\n int(_py2round(user_pars['outnx'])),\n int(_py2round(user_pars['outny']))\n )\n\n if shape[0] < 1 or shape[1] < 1:\n raise ValueError(\"Custom WCS output image size smaller than 1\")\n\n if _crpix is None:\n # make sure new image is centered on the CRPIX of the old WCS:\n _crpix = ((shape[0] + 1.0) / 2.0, (shape[1] + 1.0) / 2.0)\n\n else:\n naxis1, naxis2 = outwcs.pixel_shape\n if _delta_rot is None:\n # no rotation is involved\n\n if _ratio is not None:\n # apply scale only:\n\n # compute output image shape:\n shape = (\n max(1, int(_py2round(_ratio * naxis1))),\n max(1, int(_py2round(_ratio * naxis2)))\n )\n\n # update CRPIX:\n if _crpix is None:\n _crpix = 1.0 + _ratio * (outwcs.wcs.crpix - 1.0)\n\n else:\n _corners = np.array(\n [[0.5, 0.5],\n [naxis1 + 0.5, 0.5],\n [0.5, naxis2 + 0.5],\n [naxis1 + 0.5, naxis2 + 0.5]]\n ) - outwcs.wcs.crpix\n\n if _ratio is not None:\n # scale corners:\n _corners *= _ratio\n\n # rotate corners and find new image range:\n ((_xmin, _xmax), (_ymin, _ymax)) = util.getRotatedSize(_corners,\n _delta_rot)\n\n # compute output image shape:\n # NOTE: _py2round may be replaced with np.ceil\n shape = (\n max(1, int(_py2round(_xmax - _xmin))),\n max(1, int(_py2round(_ymax - _ymin)))\n )\n\n if _crpix is None:\n # update CRPIX:\n _crpix = (-_xmin + 0.5, -_ymin + 0.5)\n\n # Set up the new WCS based on values from old one:\n\n if _ratio is not None:\n # Update plate scale\n outwcs.wcs.cd = outwcs.wcs.cd / _ratio\n outwcs.pscale = _scale\n\n # update orientation\n if _delta_rot is not None:\n outwcs.wcs.cd = _rotateCD(outwcs.wcs.cd, _delta_rot)\n outwcs.orientat -= _delta_rot\n\n if shape is not None:\n # update size:\n outwcs.pixel_shape = shape\n\n # update reference position\n if _crpix is not None:\n outwcs.wcs.crpix = np.array(_crpix, dtype=np.float64)\n\n if _crval is not None:\n outwcs.wcs.crval = np.array(_crval, dtype=np.float64)\n\n return outwcs", "response": "Returns a copy of the input WCS with the user specified values given as dictionary derived from the input configObj object and the output PyWCS object computed from distortion. output_wcs."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef convertWCS(inwcs,drizwcs):\n drizwcs[0] = inwcs.crpix[0]\n drizwcs[1] = inwcs.crval[0]\n drizwcs[2] = inwcs.crpix[1]\n drizwcs[3] = inwcs.crval[1]\n drizwcs[4] = inwcs.cd[0][0]\n drizwcs[5] = inwcs.cd[1][0]\n drizwcs[6] = inwcs.cd[0][1]\n drizwcs[7] = inwcs.cd[1][1]\n\n return drizwcs", "response": "Copy WCSObject WCS into Drizzle compatible array."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncopy output WCS array from Drizzle into WCSObject.", "response": "def updateWCS(drizwcs,inwcs):\n \"\"\" Copy output WCS array from Drizzle into WCSObject.\"\"\"\n crpix = np.array([drizwcs[0],drizwcs[2]], dtype=np.float64)\n crval = np.array([drizwcs[1],drizwcs[3]], dtype=np.float64)\n cd = np.array([[drizwcs[4],drizwcs[6]],[drizwcs[5],drizwcs[7]]], dtype=np.float64)\n inwcs.cd = cd\n inwcs.crval = crval\n inwc.crpix = crpix\n inwcs.pscale = N.sqrt(N.power(inwcs.cd[0][0],2)+N.power(inwcs.cd[1][0],2)) * 3600.\n inwcs.orient = N.arctan2(inwcs.cd[0][1],inwcs.cd[1][1]) * 180./N.pi"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef wcsfit(img_wcs, ref_wcs):\n # Define objects that we need to use for the fit...\n #in_refpix = img_geom.model.refpix\n wmap = WCSMap(img_wcs,ref_wcs)\n cx, cy = coeff_converter.sip2idc(img_wcs)\n # Convert the RA/Dec positions back to X/Y in output product image\n #_cpix_xyref = np.zeros((4,2),dtype=np.float64)\n\n # Start by setting up an array of points +/-0.5 pixels around CRVAL1,2\n # However, we must shift these positions by 1.0pix to match what\n # drizzle will use as its reference position for 'align=center'.\n _cpix = (img_wcs.wcs.crpix[0],img_wcs.wcs.crpix[1])\n _cpix_arr = np.array([_cpix,(_cpix[0],_cpix[1]+1.),\n (_cpix[0]+1.,_cpix[1]+1.),(_cpix[0]+1.,_cpix[1])], dtype=np.float64)\n # Convert these positions to RA/Dec\n _cpix_rd = wmap.xy2rd(img_wcs,_cpix_arr[:,0],_cpix_arr[:,1])\n #for pix in xrange(len(_cpix_rd[0])):\n _cpix_xref,_cpix_yref = wmap.rd2xy(ref_wcs,_cpix_rd[0],_cpix_rd[1])\n _cpix_xyref = np.zeros((4,2),dtype=np.float64)\n _cpix_xyref[:,0] = _cpix_xref\n _cpix_xyref[:,1] = _cpix_yref\n\n \"\"\"\n # needed to handle correctly subarrays and wfpc2 data\n if img_wcs.delta_refx == 0.0 and img_wcs.delta_refy == 0.0:\n offx, offy = (0.0,0.0)\n else:\n offx, offy = (1.0, 1.0)\n \"\"\"\n offx, offy = (0.0,0.0)\n\n # Now, apply distortion model to input image XY positions\n #_cpix_xyc = np.zeros((4,2),dtype=np.float64)\n _cpix_xyc = utils.apply_idc(_cpix_arr, cx, cy, img_wcs.wcs.crpix, img_wcs.pscale, order=1)\n\n # Need to get the XDELTA,YDELTA values included here in order to get this\n # to work with MDTng.\n #if in_refpix:\n # _cpix_xyc += (in_refpix['XDELTA'], in_refpix['YDELTA'])\n\n # Perform a fit between:\n # - undistorted, input positions: _cpix_xyc\n # - X/Y positions in reference frame: _cpix_xyref\n abxt,cdyt = fitlin(_cpix_xyc,_cpix_xyref)\n\n # This correction affects the final fit when you are fitting\n # a WCS to itself (no distortion coeffs), so it needs to be\n # taken out in the coeffs file by modifying the zero-point value.\n # WJH 17-Mar-2005\n abxt[2] -= ref_wcs.wcs.crpix[0] + offx\n cdyt[2] -= ref_wcs.wcs.crpix[1] + offy\n\n return abxt,cdyt", "response": "This function computes a linear fit between 2 WCS objects and returns a new object for the image and ref_wcs."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncompute the least - squares fit between two arrays.", "response": "def fitlin(imgarr,refarr):\n \"\"\" Compute the least-squares fit between two arrays.\n A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).\n \"\"\"\n # Initialize variables\n _mat = np.zeros((3,3),dtype=np.float64)\n _xorg = imgarr[0][0]\n _yorg = imgarr[0][1]\n _xoorg = refarr[0][0]\n _yoorg = refarr[0][1]\n _sigxox = 0.\n _sigxoy = 0.\n _sigxo = 0.\n _sigyox = 0.\n _sigyoy = 0.\n _sigyo = 0.\n\n _npos = len(imgarr)\n # Populate matrices\n for i in range(_npos):\n _mat[0][0] += np.power((imgarr[i][0] - _xorg),2)\n _mat[0][1] += (imgarr[i][0] - _xorg) * (imgarr[i][1] - _yorg)\n _mat[0][2] += (imgarr[i][0] - _xorg)\n _mat[1][1] += np.power((imgarr[i][1] - _yorg),2)\n _mat[1][2] += imgarr[i][1] - _yorg\n\n _sigxox += (refarr[i][0] - _xoorg)*(imgarr[i][0] - _xorg)\n _sigxoy += (refarr[i][0] - _xoorg)*(imgarr[i][1] - _yorg)\n _sigxo += refarr[i][0] - _xoorg\n _sigyox += (refarr[i][1] - _yoorg)*(imgarr[i][0] -_xorg)\n _sigyoy += (refarr[i][1] - _yoorg)*(imgarr[i][1] - _yorg)\n _sigyo += refarr[i][1] - _yoorg\n\n _mat[2][2] = _npos\n _mat[1][0] = _mat[0][1]\n _mat[2][0] = _mat[0][2]\n _mat[2][1] = _mat[1][2]\n\n # Now invert this matrix\n _mat = linalg.inv(_mat)\n\n _a = _sigxox*_mat[0][0]+_sigxoy*_mat[0][1]+_sigxo*_mat[0][2]\n _b = -1*(_sigxox*_mat[1][0]+_sigxoy*_mat[1][1]+_sigxo*_mat[1][2])\n #_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2]\n\n _c = _sigyox*_mat[1][0]+_sigyoy*_mat[1][1]+_sigyo*_mat[1][2]\n _d = _sigyox*_mat[0][0]+_sigyoy*_mat[0][1]+_sigyo*_mat[0][2]\n #_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2]\n\n _xt = _xoorg - _a*_xorg+_b*_yorg\n _yt = _yoorg - _d*_xorg-_c*_yorg\n\n return [_a,_b,_xt],[_c,_d,_yt]"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nperforming a linear, orthogonal fit between matched lists of positions 'xy' (input) and 'uv' (output). Output: (same as for fit_arrays_general)", "response": "def fitlin_rscale(xy,uv,verbose=False):\n \"\"\" Performs a linear, orthogonal fit between matched\n lists of positions 'xy' (input) and 'uv' (output).\n\n Output: (same as for fit_arrays_general)\n \"\"\"\n mu = uv[:,0].mean()\n mv = uv[:,1].mean()\n mx = xy[:,0].mean()\n my = xy[:,1].mean()\n\n u = uv[:,0] - mu\n v = uv[:,1] - mv\n x = xy[:,0] - mx\n y = xy[:,1] - my\n\n Sxx = np.dot(x,x)\n Syy = np.dot(y,y)\n Sux = np.dot(u,x)\n Suy = np.dot(u,y)\n Svx = np.dot(v,x)\n Svy = np.dot(v,y)\n\n # implement parity check\n if (np.dot(Sux,Svy) > 0):\n p = 1\n else:\n p = -1\n\n XX = p*Sux + Svy\n YY = Suy - p*Svx\n\n # derive output values\n theta_deg = np.rad2deg(np.arctan2(YY,XX))% 360.0\n scale = np.sqrt(XX**2 + YY**2) / (Sxx+Syy)\n shift = (mu-mx,mv-my)\n if verbose:\n print('Linear RSCALE fit: rotation = ',theta_deg,' scale = ',scale,' offset = ',shift)\n coeffs = scale * fileutil.buildRotMatrix(-theta_deg)\n\n P = [coeffs[0,0],coeffs[0,1],shift[0]]\n Q = [coeffs[1,1],coeffs[1,0],shift[1]]\n return P,Q"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef fitlin_clipped(xy,uv,verbose=False,mode='rscale',nclip=3,reject=3):\n fitting_funcs = {'rscale':fitlin_rscale,'general':fitlin}\n # Get the fitting function to be used\n fit_func = fitting_funcs[mode.lower()]\n # Perform the initial fit\n P,Q = fit_func(xy,uv)\n xyc = apply_fitlin(xy,P,Q)\n\n # compute residuals from fit for input positions\n dx = uv[:,0] - xyc[0]\n dy = uv[:,1] - xyc[1]\n fit_rms = [dx.std(),dy.std()]\n\n if nclip > 0:\n data = xy.copy()\n outdata = uv.copy()\n\n numclipped = 0\n for i in range(nclip):\n iterclipped = 0\n xyc = apply_fitlin(data,P,Q)\n\n # compute residuals from fit for input positions\n dx = outdata[:,0] - xyc[0]\n dy = outdata[:,1] - xyc[1]\n\n # find indices of outliers in x and y\n xout = np.where(np.abs(dx - dx.mean()) > reject*dx.std())\n yout = np.where(np.abs(dy - dy.mean()) > reject*dy.std())\n # concatenate those indices and sort them\n outliers_indx = xout[0].tolist()+yout[0].tolist()\n outliers_indx.sort()\n # define the full range of indices for the data points left\n full_indx = list(range(data.shape[0]))\n # remove all unique indices specified in outliers from full range\n for o in outliers_indx:\n # only remove if it has not been removed already\n # accounts for the same point being an outlier in both x and y\n if full_indx.count(o) > 0:\n full_indx.remove(o)\n iterclipped += 1\n\n if iterclipped == 0:\n break\n\n numclipped += iterclipped\n if verbose:\n print('Removed a total of ',numclipped,' points through iteration ',i+1)\n # create clipped data\n data_iter = np.zeros([len(full_indx),2],dtype=data.dtype)\n if verbose:\n print('Iter #',i+1,' data:',data.shape,data_iter.shape,len(full_indx))\n\n data_iter[:,0] = data[:,0][full_indx]\n data_iter[:,1] = data[:,1][full_indx]\n outdata_iter = np.zeros([len(full_indx),2],dtype=data.dtype)\n outdata_iter[:,0] = outdata[:,0][full_indx]\n outdata_iter[:,1] = outdata[:,1][full_indx]\n\n # perform the fit again with the clipped data and go to the next iteration\n data = data_iter\n outdata = outdata_iter\n P,Q = fit_func(data,outdata)\n # compute residuals from fit for input positions\n xyc = apply_fitlin(data,P,Q)\n dx = outdata[:,0] - xyc[0]\n dy = outdata[:,1] - xyc[1]\n fit_rms = [dx.std(),dy.std()]\n\n if verbose:\n print('Fit clipped ',numclipped,' points over ',nclip,' iterations.')\n return P,Q,fit_rms", "response": "Fit a clipped version of the fitlin function to the input data and return the result of the fitlin function."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreads in an alternate primary WCS from a file.", "response": "def readAltWCS(fobj, ext, wcskey=' ', verbose=False):\n \"\"\"\n Reads in alternate primary WCS from specified extension.\n\n Parameters\n ----------\n fobj : str, `astropy.io.fits.HDUList`\n fits filename or fits file object\n containing alternate/primary WCS(s) to be converted\n wcskey : str\n [\" \",A-Z]\n alternate/primary WCS key that will be replaced by the new key\n ext : int\n fits extension number\n Returns\n -------\n hdr: fits.Header\n header object with ONLY the keywords for specified alternate WCS\n \"\"\"\n if isinstance(fobj, str):\n fobj = fits.open(fobj, memmap=False)\n\n hdr = altwcs._getheader(fobj, ext)\n try:\n original_logging_level = log.level\n log.setLevel(logutil.logging.WARNING)\n nwcs = pywcs.WCS(hdr, fobj=fobj, key=wcskey)\n\n except KeyError:\n if verbose:\n print('readAltWCS: Could not read WCS with key %s' % wcskey)\n print(' Skipping %s[%s]' % (fobj.filename(), str(ext)))\n return None\n\n finally:\n log.setLevel(original_logging_level) # restore original logging level\n\n hwcs = nwcs.to_header()\n\n if nwcs.wcs.has_cd():\n hwcs = altwcs.pc2cd(hwcs, key=wcskey)\n return hwcs"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef forward(self,pixx,pixy):\n # This matches WTRAXY results to better than 1e-4 pixels.\n skyx,skyy = self.input.all_pix2world(pixx,pixy,self.origin)\n result= self.output.wcs_world2pix(skyx,skyy,self.origin)\n return result", "response": "Transform the input pixx pixy positions in the input frame into the output frame."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef backward(self,pixx,pixy):\n skyx,skyy = self.output.wcs_pix2world(pixx,pixy,self.origin)\n result = self.input.all_world2pix(skyx,skyy,self.origin)\n return result", "response": "Transform pixx pixy positions from the output frame back onto their\n original positions in the input frame."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nconstructing a filename for the given signature.", "response": "def constructFilename(signature):\n \"\"\"Construct an output filename for the given signature::\n\n signature=[instr+detector,(nx,ny),detnum]\n\n The signature is in the image object.\n \"\"\"\n suffix = buildSignatureKey(signature)\n filename = os.path.join('.', suffix)\n return filename"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef addMember(self, imagePtr=None):\n numchips=imagePtr._numchips\n log.info(\"Computing static mask:\\n\")\n\n chips = imagePtr.group\n if chips is None:\n chips = imagePtr.getExtensions()\n\n #for chip in range(1,numchips+1,1):\n for chip in chips:\n chipid=imagePtr.scienceExt + ','+ str(chip)\n chipimage=imagePtr.getData(chipid)\n signature=imagePtr[chipid].signature\n\n # If this is a new signature, create a new Static Mask file which is empty\n # only create a new mask if one doesn't already exist\n if ((signature not in self.masklist) or (len(self.masklist) == 0)):\n self.masklist[signature] = self._buildMaskArray(signature)\n maskname = constructFilename(signature)\n self.masknames[signature] = maskname\n else:\n chip_sig = buildSignatureKey(signature)\n for s in self.masknames:\n if chip_sig in self.masknames[s]:\n maskname = self.masknames[s]\n break\n imagePtr[chipid].outputNames['staticMask'] = maskname\n\n stats = ImageStats(chipimage,nclip=3,fields='mode')\n mode = stats.mode\n rms = stats.stddev\n nbins = len(stats.histogram)\n del stats\n\n log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' %\n (mode, rms, self.static_sig))\n\n if nbins >= 2: # only combine data from new image if enough data to mask\n sky_rms_diff = mode - (self.static_sig*rms)\n np.bitwise_and(self.masklist[signature],\n np.logical_not(np.less(chipimage, sky_rms_diff)),\n self.masklist[signature])\n del chipimage", "response": "Adds a member to the static mask list."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef getMaskArray(self, signature):\n if signature in self.masklist:\n mask = self.masklist[signature]\n else:\n mask = None\n return mask", "response": "Returns the appropriate StaticMask array for the image."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the name of the output mask file that contains the given signature.", "response": "def getFilename(self,signature):\n \"\"\"Returns the name of the output mask file that\n should reside on disk for the given signature. \"\"\"\n\n filename=constructFilename(signature)\n\n if(fileutil.checkFileExists(filename)):\n return filename\n else:\n print(\"\\nmMask file for \", str(signature), \" does not exist on disk\", file=sys.stderr)\n return None"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ndeleting all static mask objects.", "response": "def close(self):\n \"\"\" Deletes all static mask objects. \"\"\"\n\n for key in self.masklist.keys():\n self.masklist[key] = None\n self.masklist = {}"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ndeletes just the mask that matches the signature given.", "response": "def deleteMask(self,signature):\n \"\"\" Delete just the mask that matches the signature given.\"\"\"\n if signature in self.masklist:\n self.masklist[signature] = None\n else:\n log.warning(\"No matching mask\")"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nsave the static mask to a file.", "response": "def saveToFile(self,imageObjectList):\n \"\"\" Saves the static mask to a file\n it uses the signatures associated with each\n mask to contruct the filename for the output mask image.\n \"\"\"\n virtual = imageObjectList[0].inmemory\n\n for key in self.masklist.keys():\n #check to see if the file already exists on disk\n filename = self.masknames[key]\n #create a new fits image with the mask array and a standard header\n #open a new header and data unit\n newHDU = fits.PrimaryHDU()\n newHDU.data = self.masklist[key]\n\n if virtual:\n for img in imageObjectList:\n img.saveVirtualOutputs({filename:newHDU})\n\n else:\n try:\n newHDU.writeto(filename, overwrite=True)\n log.info(\"Saving static mask to disk: %s\" % filename)\n\n except IOError:\n log.error(\"Problem saving static mask file: %s to \"\n \"disk!\\n\" % filename)\n raise IOError"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nexpands image from original shape to requested shape.", "response": "def expand_image(image, shape):\n \"\"\" Expand image from original shape to requested shape. Output shape\n must be an integer multiple of input image shape for each axis. \"\"\"\n if (shape[0] % image.shape[0]) or (shape[1] % image.shape[1]):\n raise ValueError(\"Output shape must be an integer multiple of input \"\n \"image shape.\")\n sx = shape[1] // image.shape[1]\n sy = shape[0] // image.shape[0]\n\n ox = (sx - 1.0) / (2.0 * sx)\n oy = (sy - 1.0) / (2.0 * sy)\n\n # generate output coordinates:\n y, x = np.indices(shape, dtype=np.float)\n x = x / sx - ox\n y = y / sy - oy\n\n # interpolate:\n return bilinear_interp(image, x, y)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ninterpolate input ``data`` at \"pixel\" coordinates ``x`` and ``y``.", "response": "def bilinear_interp(data, x, y):\n \"\"\" Interpolate input ``data`` at \"pixel\" coordinates ``x`` and ``y``. \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n if x.shape != y.shape:\n raise ValueError(\"X- and Y-coordinates must have identical shapes.\")\n\n out_shape = x.shape\n out_size = x.size\n x = x.ravel()\n y = y.ravel()\n\n x0 = np.empty(out_size, dtype=np.int)\n y0 = np.empty(out_size, dtype=np.int)\n np.clip(x, 0, data.shape[1] - 2, out=x0)\n np.clip(y, 0, data.shape[0] - 2, out=y0)\n x1 = x0 + 1\n y1 = y0 + 1\n\n f00 = data[(y0, x0)]\n f10 = data[(y1, x0)]\n f01 = data[(y0, x1)]\n f11 = data[(y1, x1)]\n\n w00 = (x1 - x) * (y1 - y)\n w10 = (x1 - x) * (y - y0)\n w01 = (x - x0) * (y1 - y)\n w11 = (x - x0) * (y - y0)\n\n interp = w00 * f00 + w10 * f10 + w01 * f01 + w11 * f11\n\n return interp.reshape(out_shape).astype(data.dtype.type)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _assignSignature(self, chip):\n sci_chip = self._image[self.scienceExt,chip]\n ny=sci_chip._naxis1\n nx=sci_chip._naxis2\n detnum = sci_chip.detnum\n instr=self._instrument\n\n sig=(instr+self._detector,(nx,ny),int(detnum)) #signature is a tuple\n sci_chip.signature=sig", "response": "Assign a unique signature for the image based\n on the instrument detector chip and size\n "} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef getReadNoise(self):\n if self.proc_unit == 'native':\n return self._rdnoise / self._gain()\n return self._rdnoise", "response": "Method for returning the read noise of a specific detector class."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef setInstrumentParameters(self, instrpars):\n pri_header = self._image[0].header\n\n if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):\n instrpars['gnkeyword'] = 'ATODGAIN'\n if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):\n instrpars['rnkeyword'] = 'READNSE'\n if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):\n instrpars['expkeyword'] = 'EXPTIME'\n\n for chip in self.returnAllChips(extname=self.scienceExt):\n\n chip._gain = self.getInstrParameter(instrpars['gain'], pri_header,\n instrpars['gnkeyword'])\n chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header,\n instrpars['rnkeyword'])\n chip._exptime = self.getInstrParameter(instrpars['exptime'], chip.header,\n instrpars['expkeyword'])\n\n if chip._gain is None or chip._rdnoise is None or chip._exptime is None:\n print('ERROR: invalid instrument task parameter')\n raise ValueError\n\n chip._effGain = chip._gain\n self._assignSignature(chip._chip) #this is used in the static mask\n\n self.doUnitConversions()", "response": "This method is used to set the default values into the parameter dictionary. It is used by the base class to set the default values for the instrument task."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nconverts the data to electrons.", "response": "def doUnitConversions(self):\n \"\"\"Convert the data to electrons.\n\n This converts all science data extensions and saves\n the results back to disk. We need to make sure\n the data inside the chips already in memory is altered as well.\n\n \"\"\"\n for det in range(1,self._numchips+1,1):\n\n chip=self._image[self.scienceExt,det]\n\n conversionFactor = self.effGain\n chip._gain = self.effGain #1.\n chip.effGain = self.effGain\n chip._conversionFactor = conversionFactor"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nanalyzing the data of the input file and returns a Astropy Table object containing the data that can be processed.", "response": "def analyze_data(inputFileList, **kwargs):\n \"\"\"\n Determine if images within the dataset can be aligned\n\n Parameters\n ==========\n inputFileList: list\n List containing FLT and/or FLC filenames for all input images which comprise an associated\n dataset where 'associated dataset' may be a single image, multiple images, an HST association,\n or a number of HST associations\n\n Returns\n =======\n outputTable: object\n Astropy Table object containing data pertaining to the associated dataset, including\n the doProcess bool. It is intended this table is updated by subsequent functions for\n bookkeeping purposes.\n\n Notes\n =====\n The keyword/value pairs below define the \"cannot process categories\".\n OBSTYPE : is not IMAGING\n MTFLAG : T\n SCAN-TYP : C or D (or !N)\n FILTER : G*, *POL*, *PRISM*\n FILTER1 : G*, *POL*, *PRISM*\n FILTER2 : G*, *POL*, *PRISM*\n APERTURE : *GRISM*, G*-REF, RAMP, *POL*, *PRISM*\n TARGNAME : DARK, TUNGSTEN, BIAS, FLAT, EARTH-CALIB, DEUTERIUM\n EXPTIME : 0\n CHINJECT : is not NONE\n\n The keyword/value pairs below define the category which the data can be processed, but\n the results may be compromised\n FGSLOCK : FINE/GYRO, FINE/GY, COARSE, GYROS\n\n FITS Keywords only for WFC3 data: SCAN_TYP, FILTER, and CHINJECT (UVIS)\n FITS Keywords only for ACS data: FILTER1 and FILTER2\n\n Please be aware of the FITS keyword value NONE vs the Python None.\n FIX: improve robustness when analyzing filter and aperture names, possibly use PHOTMODE instead\n \"\"\"\n OBSKEY = 'OBSTYPE'\n MTKEY = 'MTFLAG'\n SCNKEY = 'SCAN_TYP'\n FILKEY = 'FILTER'\n FILKEY1 = 'FILTER1'\n FILKEY2 = 'FILTER2'\n APKEY = 'APERTURE'\n TARKEY = 'TARGNAME'\n EXPKEY = 'EXPTIME'\n FGSKEY = 'FGSLOCK'\n CHINKEY = 'CHINJECT'\n\n acsFiltNameList = [FILKEY1, FILKEY2]\n\n catalog = None # Astrometric catalog used for alignment\n catalogSources = 0 # Number of astrometric catalog sources determined based upon coordinate overlap with image WCS\n foundSources = 0 # Number of sources detected in images\n matchSources = 0 # Number of sources cross matched between astrometric catalog and detected in image\n offset_x = None\n offset_y = None\n rot = None\n scale = None\n rms_x = -1.0\n rms_y = -1.0\n rms_ra = -1.0\n rms_dec = -1.0\n chisq_x = -1.0\n chisq_y = -1.0\n completed = False # If true, there was no exception and the processing completed all logic\n dateObs = None # Human readable date\n mjdutc = -1.0 # MJD UTC start of exposure\n fgslock = None\n processMsg = None\n status = 9999\n compromised = 0\n headerletFile = None\n fit_qual = -1\n\n fit_rms = -1.0\n total_rms = -1.0\n datasetKey = -1.0\n\n namesArray = ('imageName', 'instrument', 'detector', 'filter', 'aperture', 'obstype',\n 'subarray', 'dateObs', 'mjdutc', 'doProcess', 'processMsg', 'catalog', 'foundSources',\n 'catalogSources','matchSources',\n 'offset_x', 'offset_y', 'rotation','scale',\n 'rms_x', 'rms_y', 'rms_ra', 'rms_dec', 'completed',\n 'fit_rms', 'total_rms', 'datasetKey', 'status', 'fit_qual', 'headerletFile')\n dataType = ('S20', 'S20', 'S20', 'S20', 'S20', 'S20', 'b', 'S20', 'f8', 'b', 'S30',\n 'S20', 'i4', 'i4', 'i4', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8',\n 'b', 'f8', 'f8', 'i8', 'i4', 'i4', 'S30')\n\n # Create an astropy table\n outputTable = Table(names=namesArray,dtype=dataType)\n\n # Loop over the list of images to determine viability for alignment processing\n #\n # Capture the data characteristics before any evaluation so the information is\n # available for the output table regardless of which keyword is used to\n # to determine the data is not viable for alignment.\n\n for inputFile in inputFileList:\n\n header_hdu = 0\n header_data = getheader(inputFile, header_hdu)\n\n # Keywords to use potentially for downstream analysis\n instrume = (header_data['INSTRUME']).upper()\n detector = (header_data['DETECTOR']).upper()\n subarray = header_data['SUBARRAY']\n dateObs = header_data['DATE-OBS']\n mjdutc = header_data['EXPSTART']\n\n # Obtain keyword values for analysis of viability\n obstype = (header_data[OBSKEY]).upper()\n mtflag = (header_data[MTKEY]).upper()\n scan_typ = ''\n if instrume == 'WFC3':\n scan_typ = (header_data[SCNKEY]).upper()\n\n sfilter = ''\n if instrume == 'WFC3':\n sfilter = (header_data[FILKEY]).upper()\n # Concatenate the two ACS filter names together with an underscore\n # If the filter name is blank, skip it\n if instrume == 'ACS':\n for filtname in acsFiltNameList:\n\n # The filter keyword value could be zero or more blank spaces\n # Strip off any leading or trailing blanks\n if len(header_data[filtname].upper().strip()) > 0:\n\n # If the current filter variable already has some content,\n # need to append an underscore before adding more text\n if len(sfilter) > 0:\n sfilter += '_'\n sfilter += header_data[filtname].upper().strip()\n\n aperture = (header_data[APKEY]).upper()\n targname = (header_data[TARKEY]).upper()\n exptime = header_data[EXPKEY]\n fgslock = (header_data[FGSKEY]).upper()\n\n chinject = 'NONE'\n if instrume == 'WFC3' and detector == 'UVIS':\n chinject = (header_data[CHINKEY]).upper()\n\n # Determine if the image has one of these conditions. The routine\n # will exit processing upon the first satisfied condition.\n\n noProcKey = None\n noProcValue = None\n doProcess = True\n # Imaging vs spectroscopic or coronagraphic\n if obstype != 'IMAGING':\n noProcKey = OBSKEY\n noProcValue = obstype\n\n # Moving target\n elif mtflag == 'T':\n noProcKey = MTKEY\n noProcValue = mtflag\n\n # Bostrophidon without or with dwell (WFC3 only)\n elif any ([scan_typ == 'C', scan_typ == 'D']):\n noProcKey = SCNKEY\n noProcValue = scan_typ\n\n # Ramp, polarizer, grism, or prism\n elif any (x in aperture for x in ['RAMP', 'POL', 'GRISM', '-REF', 'PRISM']):\n noProcKey = APKEY\n noProcValue = aperture\n\n # Calibration target\n elif any (x in targname for x in ['DARK', 'TUNG', 'BIAS', 'FLAT', 'DEUT', 'EARTH-CAL']):\n noProcKey = TARKEY\n noProcValue = targname\n\n # Exposure time of effectively zero\n elif math.isclose(exptime, 0.0, abs_tol=1e-5):\n noProcKey = EXPKEY\n noProcValue = exptime\n\n # Commanded FGS lock\n elif any (x in fgslock for x in ['GY', 'COARSE']):\n noProcKey = FGSKEY\n noProcValue = fgslock\n\n # Charge injection mode\n elif chinject != 'NONE':\n noProcKey = CHINKEY\n noProcValue = chinject\n\n # Filter which does not begin with: 'F'(F###), 'C'(CLEAR), 'N'(N/A), and is not blank\n # The sfilter variable may be the concatenation of two filters (F160_CLEAR)\n elif sfilter and not sfilter.startswith(('F', 'C', 'N')):\n noProcKey = FILKEY\n noProcValue = sfilter\n\n elif '_' in sfilter:\n pos = sfilter.index('_')\n pos += 1\n\n if sfilter[pos] != 'F' and sfilter[pos] != '' and sfilter[pos] != 'C' and sfilter[pos] != 'N':\n noProcKey = FILKEY\n noProcValue = sfilter\n\n # If noProcKey is set to a keyword, then this image has been found to not be viable for\n # alignment purposes.\n if (noProcKey is not None):\n if (noProcKey != FGSKEY):\n doProcess = False\n msgType = Messages.NOPROC.value\n else:\n msgType = Messages.WARN.value\n\n processMsg = noProcKey + '=' + str(noProcValue)\n\n # Issue message to log file for this data indicating no processing to be done or\n # processing should be allowed, but there may be some issue with the result (e.g.,\n # GYROS mode so some drift)\n generate_msg(inputFile, msgType, noProcKey, noProcValue)\n\n # Populate a row of the table\n outputTable.add_row([inputFile, instrume, detector, sfilter, aperture, obstype,\n subarray, dateObs, mjdutc, doProcess, processMsg, catalog,\n foundSources, catalogSources, matchSources,\n offset_x, offset_y, rot, scale, rms_x, rms_y,\n rms_ra, rms_dec, completed, fit_rms, total_rms, datasetKey,\n status, fit_qual, headerletFile])\n processMsg = None\n #outputTable.pprint(max_width=-1)\n\n return(outputTable)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef generate_msg(filename, msg, key, value):\n\n log.info('Dataset ' + filename + ' has (keyword = value) of (' + key + ' = ' + str(value) + ').')\n if msg == Messages.NOPROC.value:\n log.info('Dataset cannot be aligned.')\n else:\n log.info('Dataset can be aligned, but the result may be compromised.')", "response": "Generate a message for the output log indicating the file and association is consistent with the alignment of the data."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef drizCR(input=None, configObj=None, editpars=False, **inputDict):\n log.debug(inputDict)\n inputDict[\"input\"] = input\n configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict,\n loadOnly=(not editpars))\n if configObj is None:\n return\n\n if not editpars:\n run(configObj)", "response": "Look for cosmic rays."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _driz_cr(sciImage, virtual_outputs, paramDict):\n grow = paramDict[\"driz_cr_grow\"]\n ctegrow = paramDict[\"driz_cr_ctegrow\"]\n crcorr_list = []\n cr_mask_dict = {}\n\n for chip in range(1, sciImage._numchips + 1, 1):\n exten = sciImage.scienceExt + ',' + str(chip)\n sci_chip = sciImage[exten]\n\n if not sci_chip.group_member:\n continue\n\n blot_image_name = sci_chip.outputNames['blotImage']\n\n if sciImage.inmemory:\n blot_data = sciImage.virtualOutputs[blot_image_name][0].data\n else:\n if not os.path.isfile(blot_image_name):\n raise IOError(\"Blotted image not found: {:s}\"\n .format(blot_image_name))\n\n try:\n blot_data = fits.getdata(blot_image_name, ext=0)\n except IOError:\n print(\"Problem opening blot images\")\n raise\n # Scale blot image, as needed, to match original input data units.\n blot_data *= sci_chip._conversionFactor\n\n input_image = sciImage.getData(exten)\n\n # Apply any unit conversions to input image here for comparison\n # with blotted image in units of electrons\n input_image *= sci_chip._conversionFactor\n\n # make the derivative blot image\n blot_deriv = quickDeriv.qderiv(blot_data)\n\n # Boolean mask needs to take into account any crbits values\n # specified by the user to be ignored when converting DQ array.\n dq_mask = sciImage.buildMask(chip, paramDict['crbit'])\n\n # parse out the SNR information\n snr1, snr2 = map(\n float, filter(None, re.split(\"[,;\\s]+\", paramDict[\"driz_cr_snr\"]))\n )\n\n # parse out the scaling information\n mult1, mult2 = map(\n float, filter(\n None, re.split(\"[,;\\s]+\", paramDict[\"driz_cr_scale\"])\n )\n )\n\n gain = sci_chip._effGain\n rn = sci_chip._rdnoise\n backg = sci_chip.subtractedSky * sci_chip._conversionFactor\n\n # Set scaling factor (used by MultiDrizzle) to 1 since scaling has\n # already been accounted for in blotted image\n # expmult = 1.\n\n # ################# COMPUTATION PART I ###################\n # Create a temporary array mask\n t1 = np.absolute(input_image - blot_data)\n # ta = np.sqrt(gain * np.abs((blot_data + backg) * expmult) + rn**2)\n ta = np.sqrt(gain * np.abs(blot_data + backg) + rn**2)\n t2 = (mult1 * blot_deriv + snr1 * ta / gain) # / expmult\n tmp1 = t1 <= t2\n\n # Create a convolution kernel that is 3 x 3 of 1's\n kernel = np.ones((3, 3), dtype=np.uint16)\n # Convolve the mask with the kernel\n tmp2 = signal.convolve2d(tmp1, kernel, boundary='symm', mode='same')\n\n # ################# COMPUTATION PART II ###################\n # Create the CR Mask\n t2 = (mult2 * blot_deriv + snr2 * ta / gain) # / expmult\n cr_mask = (t1 <= t2) | (tmp2 >= 9)\n\n # ################# COMPUTATION PART III ##################\n # flag additional cte 'radial' and 'tail' pixels surrounding CR pixels\n # as CRs\n\n # In both the 'radial' and 'length' kernels below, 0->good and 1->bad,\n # so that upon convolving the kernels with cr_mask, the convolution\n # output will have low->bad and high->good from which 2 new arrays are\n # created having 0->bad and 1->good. These 2 new arrays are then\n # 'anded' to create a new cr_mask.\n\n # make radial convolution kernel and convolve it with original cr_mask\n cr_grow_kernel = np.ones((grow, grow), dtype=np.uint16)\n cr_grow_kernel_conv = signal.convolve2d(\n cr_mask, cr_grow_kernel, boundary='symm', mode='same'\n )\n\n # make tail convolution kernel and convolve it with original cr_mask\n cr_ctegrow_kernel = np.zeros((2 * ctegrow + 1, 2 * ctegrow + 1))\n\n # which pixels are masked by tail kernel depends on sign of\n # sci_chip.cte_dir (i.e.,readout direction):\n if sci_chip.cte_dir == 1:\n # 'positive' direction: HRC: amp C or D; WFC: chip = sci,1; WFPC2\n cr_ctegrow_kernel[0:ctegrow, ctegrow] = 1\n elif sci_chip.cte_dir == -1:\n # 'negative' direction: HRC: amp A or B; WFC: chip = sci,2\n cr_ctegrow_kernel[ctegrow+1:2*ctegrow+1, ctegrow] = 1\n\n # do the convolution\n cr_ctegrow_kernel_conv = signal.convolve2d(\n cr_mask, cr_ctegrow_kernel, boundary='symm', mode='same'\n )\n\n # select high pixels from both convolution outputs;\n # then 'and' them to create new cr_mask\n cr_grow_mask = cr_grow_kernel_conv >= grow**2 # radial\n cr_ctegrow_mask = cr_ctegrow_kernel_conv >= ctegrow # length\n cr_mask = cr_grow_mask & cr_ctegrow_mask\n\n # Apply CR mask to the DQ array in place\n dq_mask &= cr_mask\n\n # Create the corr file\n corrFile = np.where(dq_mask, input_image, blot_data)\n corrFile /= sci_chip._conversionFactor\n corrDQMask = np.where(dq_mask, 0, paramDict['crbit']).astype(np.uint16)\n\n if paramDict['driz_cr_corr']:\n crcorr_list.append({\n 'sciext': fileutil.parseExtn(exten),\n 'corrFile': corrFile.copy(),\n 'dqext': fileutil.parseExtn(sci_chip.dq_extn),\n 'dqMask': corrDQMask\n })\n\n # Save the cosmic ray mask file to disk\n cr_mask_image = sci_chip.outputNames[\"crmaskImage\"]\n if paramDict['inmemory']:\n print('Creating in-memory(virtual) FITS file...')\n _pf = util.createFile(cr_mask.astype(np.uint8),\n outfile=None, header=None)\n cr_mask_dict[cr_mask_image] = _pf\n sciImage.saveVirtualOutputs(cr_mask_dict)\n\n else:\n # Always write out crmaskimage, as it is required input for\n # the final drizzle step. The final drizzle step combines this\n # image with the DQ information on-the-fly.\n #\n # Remove the existing mask file if it exists\n if os.path.isfile(cr_mask_image):\n os.remove(cr_mask_image)\n print(\"Removed old cosmic ray mask file: '{:s}'\"\n .format(cr_mask_image))\n print(\"Creating output: {:s}\".format(cr_mask_image))\n util.createFile(cr_mask.astype(np.uint8),\n outfile=cr_mask_image, header=None)\n\n if paramDict['driz_cr_corr']:\n createCorrFile(sciImage.outputNames[\"crcorImage\"], crcorr_list,\n sciImage._filename)", "response": "This function is used to generate a Cosmic Ray mask for a given image and virtual_outputs."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncreates a _cor file with the same format as the original input image.", "response": "def createCorrFile(outfile, arrlist, template):\n \"\"\"\n Create a _cor file with the same format as the original input image.\n\n The DQ array will be replaced with the mask array used to create the _cor\n file.\n \"\"\"\n # Remove the existing cor file if it exists\n if os.path.isfile(outfile):\n os.remove(outfile)\n print(\"Removing old corr file: '{:s}'\".format(outfile))\n\n with fits.open(template, memmap=False) as ftemplate:\n for arr in arrlist:\n ftemplate[arr['sciext']].data = arr['corrFile']\n if arr['dqext'][0] != arr['sciext'][0]:\n ftemplate[arr['dqext']].data = arr['dqMask']\n ftemplate.writeto(outfile)\n print(\"Created CR corrected file: '{:s}'\".format(outfile))"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns a dictionary of the default parameters for the current system.", "response": "def setDefaults(configObj={}):\n \"\"\" Return a dictionary of the default parameters\n which also been updated with the user overrides.\n \"\"\"\n paramDict = {\n 'gain': 7, # Detector gain, e-/ADU\n 'grow': 1, # Radius around CR pixel to mask [default=1 for\n # 3x3 for non-NICMOS]\n 'ctegrow': 0, # Length of CTE correction to be applied\n 'rn': 5, # Read noise in electrons\n 'snr': '4.0 3.0', # Signal-to-noise ratio\n 'scale': '0.5 0.4', # scaling factor applied to the derivative\n 'backg': 0, # Background value\n 'expkey': 'exptime' # exposure time keyword\n }\n\n if len(configObj) > 0:\n for key in configObj:\n paramDict[key] = configObj[key]\n\n return paramDict"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nprints out syntax help for running astrodrizzle.", "response": "def help(file=None):\n \"\"\"\n Print out syntax help for running ``astrodrizzle``\n\n Parameters\n ----------\n file : str (Default = None)\n If given, write out help to the filename specified by this parameter\n Any previously existing file with this name will be deleted before\n writing out the help.\n\n \"\"\"\n helpstr = getHelpAsString(docstring=True, show_ver=True)\n if file is None:\n print(helpstr)\n else:\n with open(file, mode='w') as f:\n f.write(helpstr)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getHelpAsString(docstring=False, show_ver=True):\n install_dir = os.path.dirname(__file__)\n taskname = util.base_taskname(__taskname__, __package__)\n htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')\n helpfile = os.path.join(install_dir, taskname + '.help')\n\n if docstring or (not docstring and not os.path.exists(htmlfile)):\n if show_ver:\n helpString = \"\\n{:s} Version {:s} updated on {:s}\\n\\n\".format(\n __taskname__, __version__, __version_date__\n )\n else:\n helpString = ''\n\n if os.path.exists(helpfile):\n helpString += teal.getHelpFileAsString(taskname, __file__)\n elif __doc__ is not None:\n helpString += __doc__ + os.linesep\n\n else:\n helpString = 'file://' + htmlfile\n\n return helpString", "response": "Return useful help from a file in the script directory called\n "} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nprints out a report of how much memory is needed for this run.", "response": "def reportResourceUsage(imageObjectList, outwcs, num_cores,\n interactive=False):\n \"\"\" Provide some information to the user on the estimated resource\n usage (primarily memory) for this run.\n \"\"\"\n\n from . import imageObject\n if outwcs is None:\n output_mem = 0\n else:\n if isinstance(outwcs,imageObject.WCSObject):\n owcs = outwcs.final_wcs\n else:\n owcs = outwcs\n output_mem = np.prod(owcs.pixel_shape) * 4 * 3 # bytes used for output arrays\n img1 = imageObjectList[0]\n numchips = 0\n input_mem = 0\n for img in imageObjectList:\n numchips += img._nmembers # account for group parameter set by user\n\n # if we have the cpus and s/w, ok, but still allow user to set pool size\n pool_size = util.get_pool_size(num_cores, None)\n pool_size = pool_size if (numchips >= pool_size) else numchips\n\n inimg = 0\n chip_mem = 0\n for img in imageObjectList:\n for chip in range(1,img._numchips+1):\n cmem = img[chip].shape[0]*img[chip].shape[1]*4\n inimg += 1\n if inimg < pool_size:\n input_mem += cmem*2\n if chip_mem == 0:\n chip_mem = cmem\n max_mem = (input_mem + output_mem*pool_size + chip_mem*2)//(1024*1024)\n\n print('*'*80)\n print('*')\n print('* Estimated memory usage: up to %d Mb.'%(max_mem))\n print('* Output image size: {:d} X {:d} pixels. '.format(*owcs.pixel_shape))\n print('* Output image file: ~ %d Mb. '%(output_mem//(1024*1024)))\n print('* Cores available: %d'%(pool_size))\n print('*')\n print('*'*80)\n\n if interactive:\n print('Continue with processing?')\n while True:\n if sys.version_info[0] >= 3:\n k = input(\"(y)es or (n)o\").strip()[0].lower()\n else:\n k = raw_input(\"(y)es or (n)o\").strip()[0].lower()\n\n if k not in ['n', 'y']:\n continue\n\n if k == 'n':\n raise KeyboardInterrupt(\"Execution aborted\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef addIVMInputs(imageObjectList,ivmlist):\n if ivmlist is None:\n return\n\n for img,ivmname in zip(imageObjectList,ivmlist):\n img.updateIVMName(ivmname)", "response": "Add IVM filenames provided by user to outputNames dictionary for each input imageObject."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nevaluates the input to determine whether there is 1 or more than 1 valid input file.", "response": "def checkMultipleFiles(input):\n \"\"\" Evaluates the input to determine whether there is 1 or more than 1 valid input file.\n \"\"\"\n f,i,o,a=buildFileList(input)\n return len(f) > 1"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn a list of imageObject instances 1 for each input image in the list of input filenames.", "response": "def createImageObjectList(files,instrpars,group=None,\n undistort=True, inmemory=False):\n \"\"\" Returns a list of imageObject instances, 1 for each input image in the list of input filenames.\n \"\"\"\n imageObjList = []\n mtflag = False\n mt_refimg = None\n for img in files:\n image = _getInputImage(img,group=group)\n image.setInstrumentParameters(instrpars)\n image.compute_wcslin(undistort=undistort)\n if 'MTFLAG' in image._image['PRIMARY'].header:\n # check to see whether we are dealing with moving target observations...\n _keyval = image._image['PRIMARY'].header['MTFLAG']\n if not util.is_blank(_keyval):\n if isinstance(_keyval,bool):\n mtflag = _keyval\n else:\n if 'T' in _keyval:\n mtflag = True\n else:\n mtflag = False\n else:\n mtflag = False\n\n if mtflag:\n print(\"#####\\nProcessing Moving Target Observations using reference image as WCS for all inputs!\\n#####\\n\")\n if mt_refimg is None:\n mt_refimg = image\n else:\n image.set_mt_wcs(mt_refimg)\n image.inmemory = inmemory # set flag for inmemory processing\n # Now add (possibly updated) image object to list\n imageObjList.append(image)\n return imageObjList"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef processFilenames(input=None,output=None,infilesOnly=False):\n ivmlist = None\n oldasndict = None\n\n if input is None:\n print(\"No input files provided to processInput\")\n raise ValueError\n\n if not isinstance(input, list) and ('_asn' in input or '_asc' in input):\n # Input is an association table\n # Get the input files, and run makewcs on them\n oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly)\n\n if not infilesOnly:\n if output in [\"\",None,\"None\"]:\n output = oldasndict['output'].lower() # insure output name is lower case\n\n asnhdr = fits.getheader(input, memmap=False)\n # Only perform duplication check if not already completed...\n dupcheck = asnhdr.get('DUPCHECK',default=\"PERFORM\") == \"PERFORM\"\n\n #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]\n filelist = buildASNList(oldasndict['order'],input,check_for_duplicates=dupcheck)\n\n elif (not isinstance(input, list)) and \\\n (input[0] == '@') :\n # input is an @ file\n f = open(input[1:])\n # Read the first line in order to determine whether\n # IVM files have been specified in a second column...\n line = f.readline()\n f.close()\n # Parse the @-file with irafglob to extract the input filename\n filelist = irafglob.irafglob(input, atfile=util.atfile_sci)\n # If there is a second column...\n if len(line.split()) == 2:\n # ...parse out the names of the IVM files as well\n ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm)\n if output in ['',None,\"None\"]:\n if len(filelist) == 1:\n output = fileutil.buildNewRootname(filelist[0])\n else:\n output = 'final'\n else:\n #input is a string or a python list\n try:\n filelist, output = parseinput.parseinput(input, outputname=output)\n if output in ['',None,\"None\"]:\n if len(filelist) == 1:\n output = fileutil.buildNewRootname(filelist[0])\n else:\n output = 'final'\n if not isinstance(input, list):\n filelist.sort()\n except IOError: raise\n\n # sort the list of input files\n # this ensures the list of input files has the same order on all platforms\n # it can have ifferent order because listdir() uses inode order, not unix type order\n #filelist.sort()\n\n\n\n return filelist, output, ivmlist, oldasndict", "response": "Process the input string which contains the input file information and return a filelist and output\nAttributeNames"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nprocesses input list of filenames and return a list of dictionaries and the full output list of filenames.", "response": "def process_input(input, output=None, ivmlist=None, updatewcs=True,\n prodonly=False, wcskey=None, **workinplace):\n \"\"\"\n Create the full input list of filenames after verifying and converting\n files as needed.\n \"\"\"\n\n newfilelist, ivmlist, output, oldasndict, origflist = buildFileListOrig(\n input, output=output, ivmlist=ivmlist, wcskey=wcskey,\n updatewcs=updatewcs, **workinplace)\n\n if not newfilelist:\n buildEmptyDRZ(input, output)\n return None, None, output\n\n # run all WCS updating -- Now done in buildFileList\n #pydr_input = _process_input_wcs(newfilelist, wcskey, updatewcs)\n pydr_input = newfilelist\n\n # AsnTable will handle the case when output==None\n if not oldasndict:# and output is not None:\n oldasndict = asnutil.ASNTable(pydr_input, output=output)\n oldasndict.create()\n\n asndict = update_member_names(oldasndict, pydr_input)\n asndict['original_file_names'] = origflist\n\n # Build output filename\n drz_extn = '_drz.fits'\n for img in newfilelist:\n # special case logic to automatically recognize when _flc.fits files\n # are provided as input and produce a _drc.fits file instead\n if '_flc.fits' in img:\n drz_extn = '_drc.fits'\n break\n\n if output in [None,'']:\n output = fileutil.buildNewRootname(asndict['output'],\n extn=drz_extn)\n else:\n if '.fits' in output.lower():\n pass\n elif drz_extn[:4] not in output.lower():\n output = fileutil.buildNewRootname(output, extn=drz_extn)\n\n\n log.info('Setting up output name: %s' % output)\n\n return asndict, ivmlist, output"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nprocess input files using the WCS key.", "response": "def _process_input_wcs(infiles, wcskey, updatewcs):\n \"\"\"\n This is a subset of process_input(), for internal use only. This is the\n portion of input handling which sets/updates WCS data, and is a performance\n hit - a target for parallelization. Returns the expanded list of filenames.\n \"\"\"\n\n # Run parseinput though it's likely already been done in processFilenames\n outfiles = parseinput.parseinput(infiles)[0]\n\n # Disable parallel processing here for now until hardware I/O gets \"wider\".\n # Since this part is IO bound, parallelizing doesn't help more than a little\n # in most cases, and may actually slow this down on some desktop nodes.\n# cfgval_num_cores = None # get this from paramDict\n# pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles))\n pool_size = 1\n\n # do the WCS updating\n if wcskey in ['', ' ', 'INDEF', None]:\n if updatewcs:\n log.info('Updating input WCS using \"updatewcs\"')\n else:\n log.info('Resetting input WCS to be based on WCS key = %s' % wcskey)\n\n if pool_size > 1:\n log.info('Executing %d parallel workers' % pool_size)\n subprocs = []\n for fname in outfiles:\n p = multiprocessing.Process(target=_process_input_wcs_single,\n name='processInput._process_input_wcs()', # for err msgs\n args=(fname, wcskey, updatewcs) )\n subprocs.append(p)\n mputil.launch_and_wait(subprocs, pool_size) # blocks till all done\n else:\n log.info('Executing serially')\n for fname in outfiles:\n _process_input_wcs_single(fname, wcskey, updatewcs)\n\n return outfiles"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nprocessing input WCSs for a single image.", "response": "def _process_input_wcs_single(fname, wcskey, updatewcs):\n \"\"\"\n See docs for _process_input_wcs.\n This is separated to be spawned in parallel.\n \"\"\"\n if wcskey in ['', ' ', 'INDEF', None]:\n if updatewcs:\n uw.updatewcs(fname, checkfiles=False)\n else:\n numext = fileutil.countExtn(fname)\n extlist = []\n for extn in range(1, numext + 1):\n extlist.append(('SCI', extn))\n if wcskey in string.ascii_uppercase:\n wkey = wcskey\n wname = ' '\n else:\n wname = wcskey\n wkey = ' '\n altwcs.restoreWCS(fname, extlist, wcskey=wkey, wcsname=wname)\n # make an asn table at the end\n # Make sure there is a WCSCORR table for each input image\n if wcskey not in ['', ' ', 'INDEF', None] or updatewcs:\n wcscorr.init_wcscorr(fname)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nbuild a file list which has undergone various instrument - specific checks for input to MultiDrizzle including splitting STIS associations.", "response": "def buildFileList(input, output=None, ivmlist=None,\n wcskey=None, updatewcs=True, **workinplace):\n \"\"\"\n Builds a file list which has undergone various instrument-specific\n checks for input to MultiDrizzle, including splitting STIS associations.\n \"\"\"\n newfilelist, ivmlist, output, oldasndict, filelist = \\\n buildFileListOrig(input=input, output=output, ivmlist=ivmlist,\n wcskey=wcskey, updatewcs=updatewcs, **workinplace)\n return newfilelist, ivmlist, output, oldasndict"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef buildFileListOrig(input, output=None, ivmlist=None,\n wcskey=None, updatewcs=True, **workinplace):\n \"\"\"\n Builds a file list which has undergone various instrument-specific\n checks for input to MultiDrizzle, including splitting STIS associations.\n Compared to buildFileList, this version returns the list of the\n original file names as specified by the user (e.g., before GEIS->MEF, or\n WAIVER FITS->MEF conversion).\n \"\"\"\n # NOTE: original file name is required in order to correctly associate\n # user catalog files (e.g., user masks to be used with 'skymatch') with\n # corresponding imageObjects.\n\n filelist, output, ivmlist, oldasndict = processFilenames(input,output)\n\n # verify that all input images specified can be updated as needed\n filelist = util.verifyFilePermissions(filelist)\n if filelist is None or len(filelist) == 0:\n return None, None, None, None, None\n\n manageInputCopies(filelist,**workinplace)\n\n # to keep track of the original file names we do the following trick:\n # pack filelist with the ivmlist using zip and later unpack the zipped list.\n #\n # NOTE: this required a small modification of the checkStisFiles function\n # in stsci.tools.check_files to be able to handle ivmlists that are tuples.\n if ivmlist is None:\n ivmlist = len(filelist)*[None]\n else:\n assert(len(filelist) == len(ivmlist)) #TODO: remove after debugging\n ivmlist = list(zip(ivmlist,filelist))\n\n # Check format of FITS files - convert Waiver/GEIS to MEF if necessary\n filelist, ivmlist = check_files.checkFITSFormat(filelist, ivmlist)\n\n # check for non-polynomial distortion correction\n if not updatewcs:\n # with updatewcs turned on, any problems will get resolved\n # so we do not need to be concerned about the state of the DGEOFILEs\n filelist = checkDGEOFile(filelist)\n\n # run all WCS updating\n updated_input = _process_input_wcs(filelist, wcskey, updatewcs)\n\n newfilelist, ivmlist = check_files.checkFiles(updated_input, ivmlist)\n\n if updatewcs:\n uw.updatewcs(','.join(set(newfilelist) - set(filelist)))\n\n if len(ivmlist) > 0:\n ivmlist, filelist = list(zip(*ivmlist))\n else:\n filelist = [] # insure that both filelist and ivmlist are defined as empty lists\n\n return newfilelist, ivmlist, output, oldasndict, filelist", "response": "Builds a list of original file names for a multi - drizzle input."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef buildASNList(rootnames, asnname, check_for_duplicates=True):\n\n # Recognize when multiple valid inputs with the same rootname are present\n # this would happen when both CTE-corrected (_flc) and non-CTE-corrected (_flt)\n # products are in the same directory as an ASN table\n filelist, duplicates = checkForDuplicateInputs(rootnames)\n\n if check_for_duplicates and duplicates:\n # Build new ASN tables for each set of input files\n origasn = changeSuffixinASN(asnname, 'flt')\n dupasn = changeSuffixinASN(asnname, 'flc')\n\n errstr = 'ERROR:\\nMultiple valid input files found:\\n'\n for fname, dname in zip(filelist, duplicates):\n errstr += ' %s %s\\n' % (fname, dname)\n errstr += ('\\nNew association files have been generated for each '\n 'version of these files.\\n %s\\n %s\\n\\nPlease '\n 're-start astrodrizzle using of these new ASN files or '\n 'use widlcards for the input to only select one type of '\n 'input file.' % (dupasn, origasn))\n\n print(textutil.textbox(errstr), file=sys.stderr)\n\n # generate new ASN files for each case,\n # report this case of duplicate inputs to the user then quit\n raise ValueError\n\n return filelist", "response": "Build a list of filenames for a given set of rootnames."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef changeSuffixinASN(asnfile, suffix):\n # Start by creating a new name for the ASN table\n _new_asn = asnfile.replace('_asn.fits','_'+suffix+'_asn.fits')\n if os.path.exists(_new_asn):\n os.remove(_new_asn)\n # copy original ASN table to new table\n shutil.copy(asnfile,_new_asn)\n\n # Open up the new copy and convert all MEMNAME's to include suffix\n fasn = fits.open(_new_asn, mode='update', memmap=False)\n fasn[0].header['DUPCHECK'] = \"COMPLETE\"\n newdata = fasn[1].data.tolist()\n for i in range(len(newdata)):\n val = newdata[i][0].decode(encoding='UTF-8').strip()\n if 'prod' not in newdata[i][1].decode(encoding='UTF-8').lower():\n val += '_'+suffix\n newdata[i] = (val,newdata[i][1].strip(),newdata[i][2])\n\n # Redefine dtype to support longer strings for MEMNAME\n new_dtype = []\n d = fasn[1].data.dtype\n msize = d.descr[0][1][1:]\n new_size = int(msize[1:])+8\n mtype = msize[0]\n new_dtype.append((d.descr[0][0],d.descr[0][1].replace(msize,'{}{}'.format(mtype,new_size))))\n new_dtype.append(d.descr[1])\n new_dtype.append(d.descr[2])\n\n # Assign newly created, reformatted array to extension\n newasn = np.array(newdata,dtype=new_dtype)\n fasn[1].data = newasn\n fasn.close()\n\n return _new_asn", "response": "Change the suffix in the ASN file to include the suffix."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncheck input files specified in ASN table for duplicate versions with multiple valid suffixes.", "response": "def checkForDuplicateInputs(rootnames):\n \"\"\"\n Check input files specified in ASN table for duplicate versions with\n multiple valid suffixes (_flt and _flc, for example).\n \"\"\"\n\n flist = []\n duplist = []\n\n for fname in rootnames:\n # Look for any recognized CTE-corrected products\n f1 = fileutil.buildRootname(fname,ext=['_flc.fits'])\n f2 = fileutil.buildRootname(fname)\n flist.append(f2)\n if os.path.exists(f1) and f1 != f2:\n # More than 1 valid input found for this rootname\n duplist.append(f1)\n\n return flist,duplist"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nresets CR bits in each input image s DQ array", "response": "def resetDQBits(imageObjectList, cr_bits_value=4096):\n \"\"\"Reset the CR bit in each input image's DQ array\"\"\"\n\n if cr_bits_value > 0:\n for img in imageObjectList:\n for chip in range(1,img._numchips+1,1):\n sci_chip = img._image[img.scienceExt,chip]\n resetbits.reset_dq_bits(sci_chip.dqfile, cr_bits_value,\n extver=chip, extname=sci_chip.dq_extn)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate names in a member dictionary.", "response": "def update_member_names(oldasndict, pydr_input):\n \"\"\"\n Update names in a member dictionary.\n\n Given an association dictionary with rootnames and a list of full\n file names, it will update the names in the member dictionary to\n contain '_*' extension. For example a rootname of 'u9600201m' will\n be replaced by 'u9600201m_c0h' making sure that a MEf file is passed\n as an input and not the corresponding GEIS file.\n \"\"\"\n\n omembers = oldasndict['members'].copy()\n nmembers = {}\n translated_names = [f.split('.fits')[0] for f in pydr_input]\n\n newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]\n keys_map = list(zip(newkeys, pydr_input))\n\n for okey, oval in list(omembers.items()):\n if okey in newkeys:\n nkey = pydr_input[newkeys.index(okey)]\n nmembers[nkey.split('.fits')[0]] = oval\n\n oldasndict.pop('members')\n # replace should be always True to cover the case when flt files were removed\n # and the case when names were translated\n\n oldasndict.update(members=nmembers, replace=True)\n oldasndict['order'] = translated_names\n return oldasndict"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef manageInputCopies(filelist, **workinplace):\n\n # Find out what directory is being used for processing\n workingdir = os.getcwd()\n # Only create sub-directory for copies of inputs, if copies are requested\n # Create name of sub-directory for copies\n origdir = os.path.join(workingdir,'OrIg_files')\n if workinplace['overwrite'] or workinplace['preserve']:\n # if sub-directory does not exist yet, create it\n if not os.path.exists(origdir):\n os.mkdir(origdir)\n\n printMsg = True\n # check to see if copies already exist for each file\n for fname in filelist:\n copymade = False # If a copy is made, no need to restore\n copyname = os.path.join(origdir,fname)\n short_copyname = os.path.join('OrIg_files',fname)\n if workinplace['overwrite']:\n print('Forcibly archiving original of: ',fname, 'as ',short_copyname)\n # make a copy of the file in the sub-directory\n if os.path.exists(copyname): os.chmod(copyname, 438) # octal 666\n shutil.copy(fname,copyname)\n os.chmod(copyname,292) # octal 444 makes files read-only\n if printMsg:\n print('\\nTurning OFF \"preserve\" and \"restore\" actions...\\n')\n printMsg = False # We only need to print this one time...\n copymade = True\n\n if (workinplace['preserve'] and not os.path.exists(copyname)) \\\n and not workinplace['overwrite']:\n # Preserving a copy of the input, but only if not already archived\n print('Preserving original of: ',fname, 'as ',short_copyname)\n # make a copy of the file in the sub-directory\n shutil.copy(fname,copyname)\n os.chmod(copyname,292) # octal 444 makes files read-only\n copymade = True\n\n if 'restore' in workinplace and not copymade:\n if (os.path.exists(copyname) and workinplace['restore']) and not workinplace['overwrite']:\n print('Restoring original input for ',fname,' from ',short_copyname)\n # replace current files with original version\n os.chmod(fname, 438) # octal 666\n shutil.copy(copyname, fname)\n os.chmod(fname, 438)", "response": "Manage input copies of all input images in a sub - directory."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef buildEmptyDRZ(input, output):\n\n # Identify the first input image\n inputfile = parseinput.parseinput(input)[0]\n if not inputfile:\n print('\\n******* ERROR *******', file=sys.stderr)\n print(\n 'No input file found! Check specification of parameter '\n '\"input\". ', file=sys.stderr)\n print('Quitting...', file=sys.stderr)\n print('******* ***** *******\\n', file=sys.stderr)\n return # raise IOError, \"No input file found!\"\n\n # Set up output file here...\n if output is None:\n if len(input) == 1:\n oname = fileutil.buildNewRootname(input[0])\n else:\n oname = 'final'\n output = fileutil.buildNewRootname(oname, extn='_drz.fits')\n else:\n if 'drz' not in output:\n output = fileutil.buildNewRootname(output, extn='_drz.fits')\n\n log.info('Setting up output name: %s' % output)\n\n # Open the first image (of the excludedFileList?) to use as a template to build\n # the DRZ file.\n try :\n log.info('Building empty DRZ file from %s' % inputfile[0])\n img = fits.open(inputfile[0], memmap=False)\n except:\n raise IOError('Unable to open file %s \\n' % inputfile)\n\n # Create the fitsobject\n fitsobj = fits.HDUList()\n # Copy the primary header\n hdu = img[0].copy()\n fitsobj.append(hdu)\n\n # Modify the 'NEXTEND' keyword of the primary header to 3 for the\n #'sci, wht, and ctx' extensions of the newly created file.\n fitsobj[0].header['NEXTEND'] = 3\n\n # Create the 'SCI' extension\n hdu = fits.ImageHDU(header=img['sci', 1].header.copy())\n hdu.header['EXTNAME'] = 'SCI'\n fitsobj.append(hdu)\n\n # Create the 'WHT' extension\n hdu = fits.ImageHDU(header=img['sci', 1].header.copy())\n hdu.header['EXTNAME'] = 'WHT'\n fitsobj.append(hdu)\n\n # Create the 'CTX' extension\n hdu = fits.ImageHDU(header=img['sci', 1].header.copy())\n hdu.header['EXTNAME'] = 'CTX'\n fitsobj.append(hdu)\n\n # Add HISTORY comments explaining the creation of this file.\n fitsobj[0].header.add_history(\"** AstroDrizzle has created this empty \"\n \"DRZ product because**\")\n fitsobj[0].header.add_history(\"** all input images were excluded from \"\n \"processing.**\")\n\n\n # Change the filename in the primary header to reflect the name of the output\n # filename.\n fitsobj[0].header['FILENAME'] = str(output) # +\"_drz.fits\"\n\n # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT\n fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0])\n # Modify the ASN_MTYP keyword to contain \"PROD-DTH\" so it can be properly\n # ingested into the archive catalog.\n\n # stis has this keyword in the [1] header, so I am directing the code\n #t o first look in the primary, then the 1\n try:\n fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH'\n except:\n fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH'\n\n # If the file is already on disk delete it and replace it with the\n # new file\n dirfiles = os.listdir(os.curdir)\n if dirfiles.count(output) > 0:\n os.remove(output)\n log.info(\" Replacing %s...\" % output)\n\n # Write out the empty DRZ file\n fitsobj.writeto(output)\n\n print(textutil.textbox(\n 'ERROR:\\nAstroDrizzle has created an empty DRZ product because all '\n 'input images were excluded from processing or a user requested the '\n 'program to stop.') + '\\n', file=sys.stderr)\n\n return", "response": "This module creates an empty DRZ file from a list of excluded files."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nverify that input file has been updated with NPOLFILE This function checks for the presence of 'NPOLFILE' kw in the primary header when 'DGEOFILE' kw is present and valid (i.e. 'DGEOFILE' is not blank or 'N/A'). It handles the case of science files downloaded from the archive before the new software was installed there. If 'DGEOFILE' is present and 'NPOLFILE' is missing, print a message and let the user choose whether to (q)uit and update the headers or (c)ontinue and run astrodrizzle without the non-polynomial correction. 'NPOLFILE' will be populated in the pipeline before astrodrizzle is run. In the case of WFPC2 the old style dgeo files are used to create detector to image correction at runtime. Parameters ---------- filenames : list of str file names of all images to be checked", "response": "def checkDGEOFile(filenames):\n \"\"\"\n Verify that input file has been updated with NPOLFILE\n\n This function checks for the presence of 'NPOLFILE' kw in the primary header\n when 'DGEOFILE' kw is present and valid (i.e. 'DGEOFILE' is not blank or 'N/A').\n It handles the case of science files downloaded from the archive before the new\n software was installed there.\n If 'DGEOFILE' is present and 'NPOLFILE' is missing, print a message and let the user\n choose whether to (q)uit and update the headers or (c)ontinue and run astrodrizzle\n without the non-polynomial correction.\n 'NPOLFILE' will be populated in the pipeline before astrodrizzle is run.\n\n In the case of WFPC2 the old style dgeo files are used to create detector to image\n correction at runtime.\n\n Parameters\n ----------\n filenames : list of str\n file names of all images to be checked\n\n \"\"\"\n\n msg = \"\"\"\n A 'DGEOFILE' keyword is present in the primary header but 'NPOLFILE' keyword was not found.\n This version of the software uses a new format for the residual distortion DGEO files.\n Please consult the instrument web pages for which reference files to download.\n A small (new style) dgeofile is needed ('_npl.fits' extension) and possibly a\n detector to image correction file ('_d2i.fits' extension).\n The names of these files must be added to the primary header either using the task XXXX\n or manually, for example:\n\n hedit {0:s}[0] npolfile fname_npl.fits add+\n hedit {0:s}[0] d2imfile fname_d2i.fits add+\n\n where fname_npl.fits is the name of the new style dgeo file and fname_d2i.fits is\n the name of the detector to image correction. After adding these keywords to the\n primary header, updatewcs must be run to update the science files:\n\n from stwcs import updatewcs\n updatewcs.updatewcs(\"{0:s}\")\n\n Alternatively you may choose to run astrodrizzle without DGEO and detector to image correction.\n\n To stop astrodrizzle and update the dgeo files, type 'q'.\n To continue running astrodrizzle without the non-polynomial distortion correction, type 'c':\n \"\"\"\n\n short_msg = \"\"\"\n To stop astrodrizzle and update the dgeo files, type 'q'.\n To continue running astrodrizzle without the non-polynomial distortion correction, type 'c':\n \"\"\"\n\n for inputfile in filenames:\n try:\n dgeofile = fits.getval(inputfile, 'DGEOFILE', memmap=False)\n except KeyError:\n continue\n if dgeofile not in [\"N/A\", \"n/a\", \"\"]:\n message = msg.format(inputfile)\n try:\n npolfile = fits.getval(inputfile, 'NPOLFILE', memmap=False)\n except KeyError:\n ustop = userStop(message)\n while ustop is None:\n ustop = userStop(short_msg)\n if ustop:\n return None\n\n return filenames"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ndefining full set of default values for unit - testing this module.", "response": "def _setDefaults(input_dict={}):\n \"\"\" Define full set of default values for unit-testing this module.[OBSOLETE]\"\"\"\n paramDict = {\n 'input':'*flt.fits',\n 'output':None,\n 'mdriztab':None,\n 'refimage':None,\n 'runfile':None,\n 'workinplace':False,\n 'updatewcs':True,\n 'proc_unit':'native',\n 'coeffs':True,\n 'context':False,\n 'clean':True,\n 'group':None,\n 'ra':None,\n 'dec':None,\n 'build':True,\n 'gain':None,\n 'gnkeyword':None,\n 'readnoise':None,\n 'rnkeyword':None,\n 'exptime':None,\n 'expkeyword':None,\n 'crbitval':4096,\n 'static':True,\n 'static_sig':4.0,\n 'skysub':True,\n 'skymethod':\"globalmin+match\",\n 'skystat':\"median\",\n 'skywidth':0.1,\n 'skylower':None,\n 'skyupper':None,\n 'skyclip':5,\n 'skylsigma':4.0,\n 'skyusigma':4.0,\n \"skymask_cat\":\"\",\n \"use_static\":True,\n \"sky_bits\":0,\n \"skyuser\":\"\",\n \"skyfile\":\"\",\n 'driz_separate':True,\n 'driz_sep_outnx':None,\n 'driz_sep_outny':None,\n 'driz_sep_crpix1':None,\n 'driz_sep_crpix2':None,\n 'driz_sep_kernel':'turbo',\n 'driz_sep_scale':None,\n 'driz_sep_pixfrac':1.0,\n 'driz_sep_rot':None,\n 'driz_sep_fillval':None,\n 'driz_sep_bits':0,\n 'median':True,\n 'median_newmasks':True,\n 'combine_type':\"minmed\",\n 'combine_nsigma':\"4 3\",\n 'combine_nlow':0,\n 'combine_nhigh':1,\n 'combine_lthresh':None,\n 'combine_hthresh':None,\n 'combine_grow':1,\n 'blot':True,\n 'blot_interp':'poly5',\n 'blot_sinscl':1.0,\n 'driz_cr':True,\n 'driz_cr_corr':False,\n 'driz_cr_snr':\"3.5 3.0\",\n 'driz_cr_scale':\"1.2 0.7\",\n 'driz_cr_cteg':0,\n 'driz_cr_grow':1,\n 'driz_combine':True,\n 'final_wht_type':\"EXP\",\n 'final_outnx':None,\n 'final_outny':None,\n 'final_crpix1':None,\n 'final_crpix2':None,\n 'final_kernel':'square',\n 'final_scale':None,\n 'final_pixfrac':1.0,\n 'final_rot':None,\n 'final_fillval':None,\n 'final_bits':0}\n\n paramDict.update(input_dict)\n\n print('\\nUser Input Parameters for Init Step:')\n util.printParams(paramDict)\n\n return paramDict"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn an array representing the dark image for the detector.", "response": "def getdarkimg(self,chip):\n \"\"\"\n Return an array representing the dark image for the detector.\n\n Returns\n -------\n dark : array\n The dark array in the same shape as the image with **units of cps**.\n\n \"\"\"\n\n # Read the temperature dependeant dark file. The name for the file is taken from\n # the TEMPFILE keyword in the primary header.\n tddobj = readTDD.fromcalfile(self.name)\n\n if tddobj is None:\n return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent()\n else:\n # Create Dark Object from AMPGLOW and Lineark Dark components\n darkobj = tddobj.getampglow() + tddobj.getlindark()\n\n # Return the darkimage taking into account an subarray information available\n return darkobj[self.ltv2:self.size2,self.ltv1:self.size1]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef isCountRate(self):\n has_bunit = False\n if 'BUNIT' in self._image['sci',1].header :\n has_bunit = True\n\n countrate = False\n if (self._image[0].header['UNITCORR'].strip() == 'PERFORM') or \\\n (has_bunit and self._image['sci',1].header['bunit'].find('/') != -1) :\n countrate = True\n\n return countrate", "response": "Method or IRInputObject used to indicate if the input\n science data is in units of counts or count rate."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef photeq(files='*_flt.fits', sciext='SCI', errext='ERR',\n ref_phot=None, ref_phot_ext=None,\n phot_kwd='PHOTFLAM', aux_phot_kwd='PHOTFNU',\n search_primary=True,\n readonly=True, clobber=False, logfile='photeq.log'):\n \"\"\"\n Adjust data values of images by equalizing each chip's PHOTFLAM value\n to a single common value so that all chips can be treated equally\n by ``AstroDrizzle``.\n\n Parameters\n ----------\n\n files : str (Default = ``'*_flt.fits'``)\n\n A string containing one of the following:\n\n * a comma-separated list of valid science image file names,\n e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;\n\n * an @-file name, e.g., ``'@files_to_match.txt'``. See notes\n section for details on the format of the @-files.\n\n .. note::\n\n **Valid science image file names** are:\n\n * file names of existing FITS, GEIS, or WAIVER FITS files;\n\n * partial file names containing wildcard characters, e.g.,\n ``'*_flt.fits'``;\n\n * Association (ASN) tables (must have ``_asn``, or ``_asc``\n suffix), e.g., ``'j12345670_asn.fits'``.\n\n sciext : str (Default = 'SCI')\n Extension *name* of extensions whose data and/or headers should\n be corrected.\n\n errext : str (Default = 'ERR')\n Extension *name* of the extensions containing corresponding error\n arrays. Error arrays are corrected in the same way as science data.\n\n ref_phot : float, None (Default = None)\n A number indicating the new value of PHOTFLAM or PHOTFNU\n (set by 'phot_kwd') to which the data should be adjusted.\n\n ref_phot_ext : int, str, tuple, None (Default = None)\n Extension from which the `photeq` should get the reference photometric\n value specified by the `phot_kwd` parameter. This parameter is ignored\n if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then\n the reference inverse sensitivity value will be picked from the\n first `sciext` of the first input image containing `phot_kwd`.\n\n phot_kwd : str (Default = 'PHOTFLAM')\n Specifies the primary keyword which contains inverse sensitivity\n (e.g., PHOTFLAM). It is used to compute conversion factors by\n which data should be rescaled.\n\n aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')\n Same as `phot_kwd` but describes *other* photometric keyword(s)\n that should be corrected by inverse of the scale factor used to correct\n data. These keywords are *not* used to compute conversion factors.\n Multiple keywords can be specified as a Python list of strings:\n ``['PHOTFNU', 'PHOTOHMY']``.\n\n .. note::\n\n If specifying multiple secondary photometric keywords in the TEAL\n interface, use a comma-separated list of keywords.\n\n search_primary : bool (Default = True)\n Specifies whether to first search the primary header for the\n presence of `phot_kwd` keyword and compute conversion factor based on\n that value. This is (partially) ignored when `ref_phot` is not `None` in\n the sense that the value specified by `ref_phot` will be used as the\n reference *but* in all images primary will be searched for `phot_kwd`\n and `aux_phot_kwd` and those values will be corrected\n (if ``search_primary=True``).\n\n readonly : bool (Default = True)\n If `True`, `photeq` will not modify input files (nevertheless, it will\n convert input GEIS or WAVERED FITS files to MEF and could overwrite\n existing MEF files if `clobber` is set to `True`).\n The (console or log file) output however will be identical to the case\n when ``readonly=False`` and it can be examined before applying these\n changes to input files.\n\n clobber : bool (Default = False)\n Overwrite existing MEF files when converting input WAVERED FITS or GEIS\n to MEF.\n\n logfile : str, None (Default = 'photeq.log')\n File name of the log file.\n\n Notes\n -----\n\n By default, `photeq` will search for the first inverse sensitivity\n value (given by the header keyword specified by the `phot_kwd` parameter,\n e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize\n all other images to this reference value.\n\n It is possible to tell `photeq` to look for the reference inverse\n sensitivity value only in a specific extension of input images, e.g.: 3,\n ('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific\n extension. This may be useful, for example, for WFPC2 images: WF3 chip was\n one of the better calibrated chips, and so, if one prefers to have\n inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,\n one can set ``ref_phot_ext=3``.\n\n Alternatively, one can provide their own reference inverse sensitivity\n value to which all other images should be \"equalized\" through the\n parameter `ref_phot`.\n\n .. note::\n\n Default parameter values (except for `files`, `readonly`, and `clobber`)\n should be acceptable for most HST images.\n\n .. warning::\n\n If images are intended to be used with ``AstroDrizzle``, it is\n recommended that sky background measurement be performed on \"equalized\"\n images as the `photeq` is not aware of sky user keyword in the image\n headers and thus it cannot correct sky values already recorded in the\n headers.\n\n Examples\n --------\n\n #. In most cases the default parameters should suffice:\n\n >>> from drizzlepac import photeq\n >>> photeq.photeq(files='*_flt.fits', readonly=False)\n\n #. If the re-calibration needs to be done on PHOTFNU rather than\n PHOTFLAM, then:\n\n >>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',\n ... aux_phot_kwd='PHOTFLAM')\n\n #. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the\n reference in WFPC2 images, then:\n\n >>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)\n\n \"\"\"\n\n # Time it\n runtime_begin = datetime.now()\n\n # check that input file name is a string:\n if not isinstance(files, str):\n raise TypeError(\"Argument 'files' must be a comma-separated list of \"\n \" file names\")\n\n # Set-up log files:\n if isinstance(logfile, str):\n # first, in case there are any \"leftover\" file handlers,\n # close and remove them:\n for h in _log.handlers:\n if h is not _sh_log and isinstance(h, logging.FileHandler):\n h.close()\n _log.removeHandler(h)\n # create file handler:\n log_formatter = logging.Formatter('[%(levelname)s:] %(message)s')\n log_file_handler = logging.FileHandler(logfile)\n log_file_handler.setFormatter(log_formatter)\n # add log_file_handler to logger\n _log.addHandler(log_file_handler)\n\n elif logfile is not None:\n raise TypeError(\"Unsupported 'logfile' type\")\n\n # BEGIN:\n _mlinfo(\"***** {0} started on {1}\".format(__taskname__, runtime_begin))\n _mlinfo(\" Version {0} ({1})\".format(__version__, __version_date__))\n\n # check that extension names are strings (or None for error ext):\n if sciext is None:\n sci_ext4parse = '*'\n ext2get = None\n else:\n if not isinstance(sciext, str):\n raise TypeError(\"Argument 'sciext' must be a string or None\")\n sciext = sciext.strip()\n if sciext.upper() == 'PRIMARY':\n sciext = sciext.upper()\n ext2get = (sciext, 1)\n else:\n ext2get = (sciext, '*')\n\n sci_ext4parse = ext2get\n\n if errext is not None and not isinstance(errext, str):\n raise TypeError(\"Argument 'errext' must be a string or None\")\n\n # check that phot_kwd is supported:\n if not isinstance(phot_kwd, str):\n raise TypeError(\"Argument 'phot_kwd' must be a string\")\n phot_kwd = phot_kwd.strip().upper()\n\n # check that ref_phot_ext has correct type:\n if ref_phot_ext is not None and not \\\n (isinstance(ref_phot_ext, int) or isinstance(ref_phot_ext, str) \\\n or (isinstance(ref_phot_ext, tuple) and len(ref_phot_ext) == 2 \\\n and isinstance(ref_phot_ext[0], str) and \\\n isinstance(ref_phot_ext[1], int))):\n raise TypeError(\"Unsupported 'ref_phot_ext' type\")\n if isinstance(ref_phot_ext, str):\n ref_phot_ext = (ref_phot_ext, 1)\n\n if aux_phot_kwd is None:\n aux_phot_kwd = []\n\n elif isinstance(aux_phot_kwd, str):\n aux_phot_kwd = [aux_phot_kwd.strip().upper()]\n if phot_kwd == aux_phot_kwd:\n raise ValueError(\"Auxiliary photometric keyword must be different \"\n \"from the main photometric keyword 'phot_kwd'.\")\n\n elif hasattr(aux_phot_kwd, '__iter__'):\n if not all([isinstance(phot, str) for phot in aux_phot_kwd]):\n raise TypeError(\"Argument 'aux_phot_kwd' must be a string, list of \"\n \"strings, or None\")\n aux_phot_kwd = [phot.strip().upper() for phot in aux_phot_kwd]\n if ref_phot in aux_phot_kwd:\n raise ValueError(\"Auxiliary photometric keyword(s) must be \"\n \"different from the main photometric keyword \"\n \"'phot_kwd'.\")\n\n else:\n raise TypeError(\"Argument 'aux_phot_kwd' must be a string, list of \"\n \"strings, or None\")\n\n # read input file list:\n fl = parseat.parse_cs_line(csline=files, default_ext=sci_ext4parse,\n im_fmode='readonly' if readonly else 'update',\n clobber=clobber, fnamesOnly=True,\n doNotOpenDQ=True)\n\n # check if user supplied file extensions, set them to the sciext,\n # and warn that they will be ignored:\n for f in fl:\n if f.count > 1 or f.fext[0] != sci_ext4parse:\n _mlwarn(\"WARNING: Extension specifications for file {:s} \"\n \"will be ignored. Using all {:s} extensions instead.\"\n .format(f.image, 'image-like' if sciext is None else \\\n \"{:s}\".format(utils.ext2str(sciext,\n default_extver=None))))\n\n # find the reference PHOTFLAM/PHOTNU:\n flc = fl[:]\n ref_hdu = None\n ref_ext = None\n ref_user = True\n\n if ref_phot is None:\n ref_user = False\n for f in flc:\n f.convert2ImageRef()\n\n # get primary hdu:\n pri_hdu = f.image.hdu[0]\n\n # find all valid extensions:\n if ref_phot_ext is None:\n if sciext == 'PRIMARY':\n extnum = [0]\n else:\n extnum = utils.get_ext_list(f.image, sciext)\n\n is_pri_hdu = [f.image.hdu[ext] is pri_hdu for ext in extnum]\n\n # if necessary, add primary header to the hdu list:\n if search_primary:\n try:\n pri_index = is_pri_hdu.index(True)\n extnum.insert(0, extnum.pop(pri_index))\n except ValueError:\n extnum.insert(0, 0)\n\n else:\n extnum = [ref_phot_ext]\n\n for ext in extnum:\n hdu = f.image.hdu[ext]\n if phot_kwd in hdu.header:\n ref_phot = hdu.header[phot_kwd]\n ref_ext = ext\n ref_hdu = hdu\n break\n\n if ref_phot is None:\n _mlwarn(\"WARNING: Could not find specified inverse \"\n \" sensitivity keyword '{:s}'\\n\"\n \" in any of the {} extensions of file '{}'.\\n\"\n \" This input file will be ignored.\"\n .format(phot_kwd, 'image-like' if sciext is None else \\\n \"{:s}\".format(utils.ext2str(sciext,\n default_extver=None)),\n os.path.basename(f.image.original_fname)))\n f.release_all_images()\n fl.remove(f)\n\n else:\n break\n\n if ref_phot is None:\n raise RuntimeError(\"Could not find the inverse sensitivity keyword \"\n \"'{:s}' in the specified headers of \"\n \"the input image(s).\\nCannot continue.\"\n .format(phot_kwd))\n\n aux_phot_kwd_list = ','.join(aux_phot_kwd)\n\n _mlinfo(\"\\nPRIMARY PHOTOMETRIC KEYWORD: {:s}\".format(phot_kwd))\n _mlinfo(\"SECONDARY PHOTOMETRIC KEYWORD(S): {:s}\"\n .format(aux_phot_kwd_list if aux_phot_kwd_list else 'None'))\n if ref_user:\n _mlinfo(\"REFERENCE VALUE PROVIDED BY USER: '{:s}'={}\\n\"\n .format(phot_kwd, ref_phot))\n else:\n _mlinfo(\"REFERENCE VALUE FROM FILE: '{:s}[{:s}]'\\n\"\n .format(os.path.basename(f.image.original_fname),\n utils.ext2str(ref_ext)))\n _mlinfo(\"REFERENCE '{:s}' VALUE IS: {}\".format(phot_kwd, ref_phot))\n\n # equalize PHOTFLAM/PHOTNU\n for f in fl:\n # open the file if necessary:\n if f.fnamesOnly:\n _mlinfo(\"\\nProcessing file '{:s}'\".format(f.image))\n f.convert2ImageRef()\n else:\n _mlinfo(\"\\nProcessing file '{:s}'\".format(f.image.original_fname))\n\n # first, see if photflam is in the primary header and save this value:\n pri_conv = None\n if search_primary:\n whdu = f.image.hdu[0]\n if phot_kwd in whdu.header:\n _mlinfo(\" * Primary header:\")\n if whdu is ref_hdu:\n pri_conv = 1.0\n _mlinfo(\" - '{}' = {} found in the primary header.\"\n .format(phot_kwd, whdu.header[phot_kwd]))\n _mlinfo(\" - Data conversion factor based on primary \"\n \"header: {}\".format(pri_conv))\n else:\n _mlinfo(\" - '{}' found in the primary header.\"\n .format(phot_kwd))\n pri_conv = whdu.header[phot_kwd] / ref_phot\n _mlinfo(\" - Setting {:s} in the primary header to {} \"\n \"(old value was {})\"\n .format(phot_kwd, ref_phot, whdu.header[phot_kwd]))\n _mlinfo(\" - Data conversion factor based on primary \"\n \"header: {}\".format(pri_conv))\n whdu.header[phot_kwd] = ref_phot\n\n # correct the \"other\" photometric keyword, if present:\n if pri_conv is not None and whdu is not ref_hdu:\n for aux_kwd in aux_phot_kwd:\n if aux_kwd in whdu.header:\n old_aux_phot = whdu.header[aux_kwd]\n new_aux_phot = old_aux_phot / pri_conv\n whdu.header[aux_kwd] = new_aux_phot\n _mlinfo(\" - Setting {:s} in the primary header \"\n \"to {} (old value was {})\"\n .format(aux_kwd, new_aux_phot, old_aux_phot))\n\n # process data and error arrays when 'sciext' was specifically set to\n # 'PRIMARY':\n if sciext == 'PRIMARY' and pri_conv is not None:\n has_data = (hasattr(whdu, 'data') and\n whdu.data is not None)\n\n # correct data:\n if has_data:\n if np.issubdtype(whdu.data.dtype, np.floating):\n whdu.data *= pri_conv\n _mlinfo(\" - Data have been multiplied by {}\"\n .format(pri_conv))\n else:\n _mlwarn(\"WARNING: Data not converted because it is of \"\n \"non-floating point type.\")\n\n # correct error array:\n if errext is not None:\n eext = (errext, 1)\n try:\n whdu = f.image.hdu[eext]\n except KeyError:\n _mlwarn(\" - WARNING: Error extension {:s} not found.\"\n .format(utils.ext2str(eext)))\n\n f.release_all_images()\n continue\n\n if hasattr(whdu, 'data') and whdu.data is not None:\n if np.issubdtype(whdu.data.dtype, np.floating):\n whdu.data *= pri_conv\n _mlinfo(\" - Error array (ext={}) has been \"\n \"multiplied by {}\".format(eext, pri_conv))\n else:\n _mlinfo(\" - Error array in extension {:s} \"\n \"contains non-floating point data.\\n\"\n \" Skipping this extension\"\n .format(utils.ext2str(ext)))\n\n f.release_all_images()\n continue\n\n # find all valid extensions:\n extnum = utils.get_ext_list(f.image, sciext)\n\n for ext in extnum:\n whdu = f.image.hdu[ext]\n conv = None\n\n if whdu is ref_hdu:\n _mlinfo(\" * EXT: {} - This is the \\\"reference\\\" extension.\\n\"\n \" Nothing to do. Skipping this extension...\"\n .format(ext))\n continue\n\n has_data = (hasattr(whdu, 'data') and\n whdu.data is not None)\n\n if has_data and not np.issubdtype(whdu.data.dtype, np.floating):\n _mlinfo(\" * EXT: {} contains non-floating point data. \"\n \"Skipping this extension\".format(ext))\n\n # find all auxiliary photometric keywords present in the header:\n paux = [aux_kwd for aux_kwd in aux_phot_kwd if aux_kwd \\\n in whdu.header]\n\n if phot_kwd in whdu.header:\n _mlinfo(\" * EXT: {}\".format(ext))\n old_phot = whdu.header[phot_kwd]\n conv = old_phot / ref_phot\n _mlinfo(\" - Setting {:s} to {} (old value was {})\"\n .format(phot_kwd, ref_phot, old_phot))\n whdu.header[phot_kwd] = ref_phot\n _mlinfo(\" - Computed conversion factor for data: {}\"\n .format(conv))\n\n elif pri_conv is None:\n _mlinfo(\" * EXT: {}\".format(ext))\n _mlinfo(\" - '{:s} not found. Skipping this extension...\"\n .format(phot_kwd))\n continue\n\n else:\n _mlinfo(\" * EXT: {}\".format(ext))\n\n # if paux:\n # print(\"ERROR: Primary photometric keyword ('{:s}') is \"\n # \"missing but\\n the secondary keywords ('{:s}') \"\n # \"are present. This extension cannot be processed.\"\n # .format(phot_kwd, ','.join(paux)))\n # continue\n\n _mlinfo(\" - '{:s} not found. Using conversion factor \"\n \"based\\n on the primary header: {}\"\n .format(phot_kwd, pri_conv))\n conv = pri_conv\n\n # correct the \"other\" photometric keyword, if present:\n if conv is not None:\n for aux_kwd in paux:\n old_aux_phot = whdu.header[aux_kwd]\n new_aux_phot = old_aux_phot / conv\n whdu.header[aux_kwd] = new_aux_phot\n _mlinfo(\" - Setting {:s} to {} (old value was {})\"\n .format(aux_kwd, new_aux_phot, old_aux_phot))\n\n # correct data:\n if has_data:\n if conv is None:\n _mlinfo(\" * EXT: {}\".format(ext))\n\n if np.issubdtype(whdu.data.dtype, np.floating):\n whdu.data *= conv\n _mlinfo(\" - Data have been multiplied by {}\"\n .format(conv))\n else:\n _mlinfo(\"WARNING: Non-floating point data. Data cannot \"\n \"be re-scaled.\")\n\n # correct error array:\n if errext is not None and isinstance(ext, tuple) and len(ext) == 2:\n eext = (errext, ext[1])\n try:\n whdu = f.image.hdu[eext]\n except KeyError:\n continue\n\n if hasattr(whdu, 'data') and whdu.data is not None:\n if np.issubdtype(whdu.data.dtype, np.floating):\n whdu.data *= conv\n _mlinfo(\" - Error array (ext={}) has been \"\n \"multiplied by {}\".format(eext, conv))\n else:\n _mlinfo(\" - Error array in extension {:s} \"\n \"contains non-floating point data.\\n\"\n \" Skipping this extension\"\n .format(utils.ext2str(ext)))\n\n f.release_all_images()\n\n _mlinfo(\"\\nDone.\")\n\n if readonly:\n _mlinfo(\"\\nNOTE: '{:s}' was run in READONLY mode\\n\"\n \" and input image(s)' content WAS NOT MODIFIED.\"\n .format(__taskname__))\n\n # close all log file handlers:\n for h in _log.handlers:\n if h is not _sh_log and isinstance(h, logging.FileHandler):\n h.close()\n _log.removeHandler(h)", "response": "This function calculates the PHOTFLAM value of the chips in the current tree and stores it in the tree structure."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nmanage the PSET - like configobj parameters for a particular task.", "response": "def _managePsets(configobj, section_name, task_name, iparsobj=None, input_dict=None):\n \"\"\" Read in parameter values from PSET-like configobj tasks defined for\n source-finding algorithms, and any other PSET-like tasks under this task,\n and merge those values into the input configobj dictionary.\n \"\"\"\n # Merge all configobj instances into a single object\n configobj[section_name] = {}\n\n # Load the default full set of configuration parameters for the PSET:\n iparsobj_cfg = teal.load(task_name)\n\n # Identify optional parameters in input_dicts that are from this\n # PSET and add it to iparsobj:\n if input_dict is not None:\n for key in list(input_dict.keys()):\n if key in iparsobj_cfg:\n if iparsobj is not None and key in iparsobj:\n raise DuplicateKeyError(\"Duplicate parameter '{:s}' \"\n \"provided for task {:s}\".format(key, task_name))\n iparsobj_cfg[key] = input_dict[key]\n del input_dict[key]\n\n if iparsobj is not None:\n iparsobj_cfg.update(iparsobj)\n\n del iparsobj_cfg['_task_name_']\n\n # merge these parameters into full set\n configobj[section_name].merge(iparsobj_cfg)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nallows the user to edit the imagefindpars configObj in a TEAL GUI", "response": "def edit_imagefindpars():\n \"\"\" Allows the user to edit the imagefindpars configObj in a TEAL GUI\n \"\"\"\n teal.teal(imagefindpars.__taskname__, returnAs=None,\n autoClose=True, loadOnly=False, canExecute=False)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nallow the user to edit the refimagefindpars configObj in a TEAL GUI", "response": "def edit_refimagefindpars():\n \"\"\" Allows the user to edit the refimagefindpars configObj in a TEAL GUI\n \"\"\"\n teal.teal(refimagefindpars.__taskname__, returnAs=None,\n autoClose=True, loadOnly=False, canExecute=False)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nprint information about the specified local repository to STDOUT.", "response": "def print_rev_id(localRepoPath):\n \"\"\"prints information about the specified local repository to STDOUT. Expected method of execution: command-line or\n shell script call\n\n Parameters\n ----------\n localRepoPath: string\n Local repository path.\n\n Returns\n =======\n Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if\n something went wrong.\n \"\"\"\n start_path = os.getcwd()\n try:\n log.info(\"Local repository path: {}\".format(localRepoPath))\n os.chdir(localRepoPath)\n log.info(\"\\n== Remote URL\")\n os.system('git remote -v')\n\n # log.info(\"\\n== Remote Branches\")\n # os.system(\"git branch -r\")\n\n log.info(\"\\n== Local Branches\")\n os.system(\"git branch\")\n\n log.info(\"\\n== Most Recent Commit\")\n os.system(\"git log |head -1\")\n rv = 0\n except:\n rv = 111\n log.info(\"WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.\")\n finally:\n os.chdir(start_path)\n if rv != 0:\n sys.exit(rv)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_rev_id(localRepoPath):\n start_path = os.getcwd()\n try:\n os.chdir(localRepoPath)\n\n instream = os.popen(\"git --no-pager log --max-count=1 | head -1\")\n for streamline in instream.readlines():\n streamline = streamline.strip()\n if streamline.startswith(\"commit \"):\n rv = streamline.replace(\"commit \",\"\")\n else:\n raise\n except:\n rv = \"FAILURE: git revision info not found\"\n finally:\n os.chdir(start_path)\n\n return(rv)", "response": "returns the current full git revision id of the specified local repository"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update(input,refdir=\"jref$\",local=None,interactive=False,wcsupdate=True):\n print('UPDATENPOL Version',__version__+'('+__version_date__+')')\n # expand (as needed) the list of input files\n files,fcol = parseinput.parseinput(input)\n\n if not interactive:\n # expand reference directory name (if necessary) to\n # interpret IRAF or environment variable names\n rdir = fu.osfn(refdir)\n ngeofiles,ngcol = parseinput.parseinput(os.path.join(rdir,'*npl.fits'))\n # Find D2IMFILE in refdir for updating input file header as well\n d2ifiles,d2col = parseinput.parseinput(os.path.join(rdir,\"*d2i.fits\"))\n\n # Now, build a matched list of input files and DGEOFILE reference files\n # to use for selecting the appropriate new reference file from the\n # refdir directory.\n for f in files:\n print('Updating: ',f)\n fdir = os.path.split(f)[0]\n # Open each file...\n fimg = fits.open(f, mode='update', memmap=False)\n phdr = fimg['PRIMARY'].header\n fdet = phdr['detector']\n # get header of DGEOFILE\n dfile = phdr.get('DGEOFILE','')\n if dfile in ['N/A','',' ',None]:\n npolname = ''\n else:\n dhdr = fits.getheader(fu.osfn(dfile), memmap=False)\n if not interactive:\n # search all new NPOLFILEs for one that matches current DGEOFILE config\n npol = find_npolfile(ngeofiles,fdet,[phdr['filter1'],phdr['filter2']])\n else:\n if sys.version_info[0] >= 3:\n npol = input(\"Enter name of NPOLFILE for %s:\"%f)\n else:\n npol = raw_input(\"Enter name of NPOLFILE for %s:\"%f)\n if npol == \"\": npol = None\n\n if npol is None:\n errstr = \"No valid NPOLFILE found in \"+rdir+\" for detector=\"+fdet+\"\\n\"\n errstr += \" filters = \"+phdr['filter1']+\",\"+phdr['filter2']\n raise ValueError(errstr)\n\n npolname = os.path.split(npol)[1]\n if local:\n npolname = os.path.join(fdir,npolname)\n # clobber any previous copies of this reference file\n if os.path.exists(npolname): os.remove(npolname)\n shutil.copy(npol,npolname)\n else:\n if '$' in refdir:\n npolname = refdir+npolname\n else:\n npolname = os.path.join(refdir,npolname)\n phdr.set('NPOLFILE', value=npolname,\n comment=\"Non-polynomial corrections in Paper IV LUT\",\n after='DGEOFILE')\n\n # Now find correct D2IFILE\n if not interactive:\n d2i = find_d2ifile(d2ifiles,fdet)\n else:\n if sys.version_info[0] >= 3:\n d2i = input(\"Enter name of D2IMFILE for %s:\"%f)\n else:\n d2i = raw_input(\"Enter name of D2IMFILE for %s:\"%f)\n if d2i == \"\": d2i = None\n\n if d2i is None:\n print('=============\\nWARNING:')\n print(\" No valid D2IMFILE found in \"+rdir+\" for detector =\"+fdet)\n print(\" D2IMFILE correction will not be applied.\")\n print('=============\\n')\n d2iname = \"\"\n else:\n d2iname = os.path.split(d2i)[1]\n if local:\n # Copy D2IMFILE to local data directory alongside input file as well\n d2iname = os.path.join(fdir,d2iname)\n # clobber any previous copies of this reference file\n if os.path.exists(d2iname): os.remove(d2iname)\n shutil.copy(d2i,d2iname)\n else:\n if '$' in refdir:\n d2iname = refdir+d2iname\n else:\n d2iname = os.path.join(refdir,d2iname)\n\n phdr.set('D2IMFILE', value=d2iname,\n comment=\"Column correction table\",\n after='DGEOFILE')\n\n # Close this input file header and go on to the next\n fimg.close()\n\n if wcsupdate:\n updatewcs.updatewcs(f)", "response": "Updates the headers of the input file with the new reference files."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef find_d2ifile(flist,detector):\n d2ifile = None\n for f in flist:\n fdet = fits.getval(f, 'detector', memmap=False)\n if fdet == detector:\n d2ifile = f\n return d2ifile", "response": "Search a list of files for one that matches the detector specified."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef find_npolfile(flist,detector,filters):\n npolfile = None\n for f in flist:\n fdet = fits.getval(f, 'detector', memmap=False)\n if fdet == detector:\n filt1 = fits.getval(f, 'filter1', memmap=False)\n filt2 = fits.getval(f, 'filter2', memmap=False)\n fdate = fits.getval(f, 'date', memmap=False)\n if filt1 == 'ANY' or \\\n (filt1 == filters[0] and filt2 == filters[1]):\n npolfile = f\n return npolfile", "response": "Search a list of files for one that matches the configuration\n of detector and filters used."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nrun Teal interface for running this code.", "response": "def run(configobj=None,editpars=False):\n \"\"\" Teal interface for running this code.\n \"\"\"\n\n if configobj is None:\n configobj =teal.teal(__taskname__,loadOnly=(not editpars))\n\n update(configobj['input'],configobj['refdir'],\n local=configobj['local'],interactive=configobj['interactive'],\n wcsupdate=configobj['wcsupdate'])"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef reset_dq_bits(input,bits,extver=None,extname='dq'):\n # Interpret bits value\n bits = interpret_bit_flags(bits)\n\n flist, fcol = parseinput.parseinput(input)\n for filename in flist:\n # open input file in write mode to allow updating the DQ array in-place\n p = fits.open(filename, mode='update', memmap=False)\n\n # Identify the DQ array to be updated\n # If no extver is specified, build a list of all DQ arrays in the file\n if extver is None:\n extver = []\n for hdu in p:\n # find only those extensions which match the input extname\n # using case-insensitive name comparisons for 'extname'\n if 'extver' in hdu.header and \\\n hdu.header['extname'].lower() == extname.lower():\n extver.append(int(hdu.header['extver']))\n else:\n # Otherwise, insure that input extver values are a list\n if not isinstance(extver, list): extver = [extver]\n\n # for each DQ array identified in the file...\n for extn in extver:\n dqarr = p[extname,extn].data\n dqdtype = dqarr.dtype\n # reset the desired bits\n p[extname,extn].data = (dqarr & ~bits).astype(dqdtype) # preserve original dtype\n log.info('Reset bit values of %s to a value of 0 in %s[%s,%s]' %\n (bits, filename, extname, extn))\n # close the file with the updated DQ array(s)\n p.close()", "response": "This function resets the bits in the integer array of all the DQ arrays in a FITS file."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreplace pixels in input that have a value of pixvalue with a value given by newvalue.", "response": "def replace(input, **pars):\n \"\"\" Replace pixels in `input` that have a value of `pixvalue`\n with a value given by `newvalue`.\n\n\n \"\"\"\n pixvalue = pars.get('pixvalue', np.nan)\n if pixvalue is None: pixvalue = np.nan # insure that None == np.nan\n\n newvalue = pars.get('newvalue', 0.0)\n ext = pars.get('ext',None)\n if ext in ['',' ','None',None]:\n ext = None\n\n files = parseinput.parseinput(input)[0]\n\n for f in files:\n fimg = fits.open(f, mode='update', memmap=False)\n\n if ext is None:\n # replace pixels in ALL extensions\n extn = [i for i in fimg]\n else:\n if type(ext) == type([]):\n extn = [fimg[e] for e in ext]\n else:\n extn = [fimg[ext]]\n\n for e in extn:\n if e.data is not None and e.is_image: # ignore empty Primary HDUs\n print(\"Converting {}[{},{}] value of {} to {}\".format(\n f,e.name,e.ver,pixvalue,newvalue))\n if np.isnan(pixvalue):\n e.data[np.isnan(e.data)] = newvalue\n else:\n e.data[np.where(e.data == pixvalue)] = newvalue\n\n fimg.close()"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef tweakback(drzfile, input=None, origwcs = None,\n newname = None, wcsname = None,\n extname='SCI', force=False, verbose=False):\n \"\"\"\n Apply WCS solution recorded in drizzled file to distorted input images\n (``_flt.fits`` files) used to create the drizzled file. This task relies on\n the original WCS and updated WCS to be recorded in the drizzled image's\n header as the last 2 alternate WCSs.\n\n Parameters\n ----------\n drzfile : str (Default = '')\n filename of undistorted image which contains the new WCS\n and WCS prior to being updated\n\n newname : str (Default = None)\n Value of ``WCSNAME`` to be used to label the updated solution in the\n output (eq., ``_flt.fits``) files. If left blank or None, it will\n default to using the current ``WCSNAME`` value from the input drzfile.\n\n input : str (Default = '')\n filenames of distorted images to be updated using new WCS\n from 'drzfile'. These can be provided either as an ``@-file``,\n a comma-separated list of filenames or using wildcards.\n\n .. note:: A blank value will indicate that the task should derive the\n filenames from the 'drzfile' itself, if possible. The filenames will be\n derived from the ``D*DATA`` keywords written out by\n ``AstroDrizzle``. If they can not be found, the task will quit.\n\n origwcs : str (Default = None)\n Value of ``WCSNAME`` keyword prior to the drzfile image being updated\n by ``TweakReg``. If left blank or None, it will default to using the\n second to last ``WCSNAME*`` keyword value found in the header.\n\n wcsname : str (Default = None)\n Value of WCSNAME for updated solution written out by ``TweakReg`` as\n specified by the `wcsname` parameter from ``TweakReg``. If this is\n left blank or `None`, it will default to the current ``WCSNAME``\n value from the input drzfile.\n\n extname : str (Default = 'SCI')\n Name of extension in `input` files to be updated with new WCS\n\n force : bool (Default = False)\n This parameters specified whether or not to force an update of the WCS\n even though WCS already exists with this solution or `wcsname`?\n\n verbose : bool (Default = False)\n This parameter specifies whether or not to print out additional\n messages during processing.\n\n\n Notes\n -----\n The algorithm used by this function is based on linearization of\n the exact compound operator that converts input image coordinates\n to the coordinates (in the input image) that would result in\n alignment with the new drizzled image WCS.\n\n If no input distorted files are specified as input, this task will attempt\n to generate the list of filenames from the drizzled input file's own\n header.\n\n EXAMPLES\n --------\n An image named ``acswfc_mos2_drz.fits`` was created from 4 images using\n astrodrizzle. This drizzled image was then aligned to another image using\n tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``.\n The new WCS can then be used to update each of the 4 images that were\n combined to make up this drizzled image using:\n\n >>> from drizzlepac import tweakback\n >>> tweakback.tweakback('acswfc_mos2_drz.fits')\n\n If the same WCS should be applied to a specific set of images, those images\n can be updated using:\n\n >>> tweakback.tweakback('acswfc_mos2_drz.fits',\n ... input='img_mos2a_flt.fits,img_mos2e_flt.fits')\n\n See Also\n --------\n stwcs.wcsutil.altwcs: Alternate WCS implementation\n\n \"\"\"\n print(\"TweakBack Version {:s}({:s}) started at: {:s}\\n\"\n .format(__version__,__version_date__,util._ptime()[0]))\n\n # Interpret input list/string into list of filename(s)\n fltfiles = parseinput.parseinput(input)[0]\n\n if fltfiles is None or len(fltfiles) == 0:\n # try to extract the filenames from the drizzled file's header\n fltfiles = extract_input_filenames(drzfile)\n if fltfiles is None:\n print('*'*60)\n print('*')\n print('* ERROR:')\n print('* No input filenames found! ')\n print('* Please specify \"fltfiles\" or insure that input drizzled')\n print('* image contains D*DATA keywords. ')\n print('*')\n print('*'*60)\n raise ValueError\n\n if not isinstance(fltfiles,list):\n fltfiles = [fltfiles]\n\n sciext = determine_extnum(drzfile, extname='SCI')\n scihdr = fits.getheader(drzfile, ext=sciext, memmap=False)\n\n ### Step 1: Read in updated and original WCS solutions\n # determine keys for all alternate WCS solutions in drizzled image header\n wkeys = wcsutil.altwcs.wcskeys(drzfile, ext=sciext)\n wnames = wcsutil.altwcs.wcsnames(drzfile, ext=sciext)\n if not util.is_blank(newname):\n final_name = newname\n else:\n final_name = wnames[wkeys[-1]]\n\n # Read in HSTWCS objects for final,updated WCS and previous WCS from\n # from drizzled image header\n # The final solution also serves as reference WCS when using updatehdr\n if not util.is_blank(wcsname):\n for k in wnames:\n if wnames[k] == wcsname:\n wcskey = k\n break\n else:\n wcskey = wkeys[-1]\n final_wcs = wcsutil.HSTWCS(drzfile, ext=sciext, wcskey=wkeys[-1])\n\n if not util.is_blank(origwcs):\n for k in wnames:\n if wnames[k] == origwcs:\n orig_wcskey = k\n orig_wcsname = origwcs\n break\n else:\n orig_wcsname,orig_wcskey = determine_orig_wcsname(scihdr,wnames,wkeys)\n\n orig_wcs = wcsutil.HSTWCS(drzfile,ext=sciext,wcskey=orig_wcskey)\n\n # read in RMS values reported for new solution\n crderr1kw = 'CRDER1'+wkeys[-1]\n crderr2kw = 'CRDER2'+wkeys[-1]\n\n if crderr1kw in scihdr:\n crderr1 = fits.getval(drzfile, crderr1kw, ext=sciext, memmap=False)\n else:\n crderr1 = 0.0\n\n if crderr2kw in scihdr:\n crderr2 = fits.getval(drzfile, crderr2kw, ext=sciext, memmap=False)\n else:\n crderr2 = 0.0\n del scihdr\n\n ### Step 2: Apply solution to input file headers\n for fname in fltfiles:\n logstr = \"....Updating header for {:s}...\".format(fname)\n if verbose:\n print(\"\\n{:s}\\n\".format(logstr))\n else:\n log.info(logstr)\n\n # reset header WCS keywords to original (OPUS generated) values\n imhdulist = fits.open(fname, mode='update', memmap=False)\n extlist = get_ext_list(imhdulist, extname='SCI')\n if not extlist:\n extlist = [0]\n\n # insure that input PRIMARY WCS has been archived before overwriting\n # with new solution\n wcsutil.altwcs.archiveWCS(imhdulist, extlist, reusekey=True)\n\n # Process MEF images...\n for ext in extlist:\n logstr = \"Processing {:s}[{:s}]\".format(imhdulist.filename(),\n ext2str(ext))\n if verbose:\n print(\"\\n{:s}\\n\".format(logstr))\n else:\n log.info(logstr)\n chip_wcs = wcsutil.HSTWCS(imhdulist, ext=ext)\n\n update_chip_wcs(chip_wcs, orig_wcs, final_wcs,\n xrms=crderr1, yrms = crderr2)\n\n # Update FITS file with newly updated WCS for this chip\n extnum = imhdulist.index(imhdulist[ext])\n updatehdr.update_wcs(imhdulist, extnum, chip_wcs,\n wcsname=final_name, reusename=False,\n verbose=verbose)\n\n imhdulist.close()", "response": "This function takes input drizzled file and returns a list of all the relevant attributes for the new WCS and original WCSs."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ngenerate a list of filenames from a drizzled image s header", "response": "def extract_input_filenames(drzfile):\n \"\"\"\n Generate a list of filenames from a drizzled image's header\n \"\"\"\n data_kws = fits.getval(drzfile, 'd*data', ext=0, memmap=False)\n if len(data_kws) == 0:\n return None\n fnames = []\n for kw in data_kws.cards:\n f = kw.value.split('[')[0]\n if f not in fnames:\n fnames.append(f)\n\n return fnames"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ndetermines the name of the original unmodified WCS solution", "response": "def determine_orig_wcsname(header, wnames, wkeys):\n \"\"\"\n Determine the name of the original, unmodified WCS solution\n \"\"\"\n orig_wcsname = None\n orig_key = None\n if orig_wcsname is None:\n for k,w in wnames.items():\n if w[:4] == 'IDC_':\n orig_wcsname = w\n orig_key = k\n break\n if orig_wcsname is None:\n # No IDC_ wcsname found... revert to second to last if available\n if len(wnames) > 1:\n orig_key = wkeys[-2]\n orig_wcsname = wnames[orig_key]\n return orig_wcsname,orig_key"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef parse_atfile_cat(input):\n with open(input[1:]) as f:\n catlist = []\n catdict = {}\n for line in f.readlines():\n if line[0] == '#' or not line.strip():\n continue\n\n lspl = line.split()\n if len(lspl) > 1:\n catdict[lspl[0]] = lspl[1:]\n catlist.append(lspl[1:])\n else:\n catdict[lspl[0]] = None\n catlist.append(None)\n\n return catlist, catdict", "response": "Parse the list of catalog filenames specified as part of the input at - file\n "} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nfunctioning to parse RA and Dec input values and turn them into decimal degrees", "response": "def parse_skypos(ra, dec):\n \"\"\"\n Function to parse RA and Dec input values and turn them into decimal\n degrees\n\n Input formats could be:\n [\"nn\",\"nn\",\"nn.nn\"]\n \"nn nn nn.nnn\"\n \"nn:nn:nn.nn\"\n \"nnH nnM nn.nnS\" or \"nnD nnM nn.nnS\"\n nn.nnnnnnnn\n \"nn.nnnnnnn\"\n\n \"\"\"\n rval = make_val_float(ra)\n dval = make_val_float(dec)\n if rval is None:\n rval, dval = radec_hmstodd(ra, dec)\n return rval, dval"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef radec_hmstodd(ra, dec):\n hmstrans = string.maketrans(string.ascii_letters,\n ' ' * len(string.ascii_letters))\n\n if isinstance(ra, list):\n rastr = ':'.join(ra)\n elif isinstance(ra, float):\n rastr = None\n pos_ra = ra\n elif ra.find(':') < 0:\n # convert any non-numeric characters to spaces\n # (we already know the units)\n rastr = ra.translate(hmstrans).strip()\n rastr = rastr.replace(' ', ' ')\n # convert 'nn nn nn.nn' to final 'nn:nn:nn.nn' string\n rastr = rastr.replace(' ', ':')\n else:\n rastr = ra\n\n if isinstance(dec, list):\n decstr = ':'.join(dec)\n elif isinstance(dec, float):\n decstr = None\n pos_dec = dec\n elif dec.find(':') < 0:\n decstr = dec.translate(hmstrans).strip()\n decstr = decstr.replace(' ', ' ')\n decstr = decstr.replace(' ', ':')\n else:\n decstr = dec\n\n if rastr is None:\n pos = (pos_ra, pos_dec)\n else:\n pos_coord = coords.SkyCoord(rastr + ' ' + decstr,\n unit=(u.hourangle, u.deg))\n pos = (pos_coord.ra.deg, pos_coord.dec.deg)\n return pos", "response": "Function to convert HMS values into decimal degrees."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nread in exclusion definitions from file named by exclusions and return a list of positions and distances.", "response": "def parse_exclusions(exclusions):\n \"\"\" Read in exclusion definitions from file named by 'exclusions'\n and return a list of positions and distances\n \"\"\"\n fname = fileutil.osfn(exclusions)\n if os.path.exists(fname):\n with open(fname) as f:\n flines = f.readlines()\n else:\n print('No valid exclusions file \"', fname, '\" could be found!')\n print('Skipping application of exclusions files to source catalogs.')\n return None\n\n # Parse out lines which can be interpreted as positions and distances\n exclusion_list = []\n units = None\n for line in flines:\n if line[0] == '#' or 'global' in line[:6]:\n continue\n # Only interpret the part of the line prior to the comment\n # if a comment has been attached to the line\n if '#' in line:\n line = line.split('#')[0].rstrip()\n\n if units is None:\n units = 'pixels'\n if line[:3] in ['fk4', 'fk5', 'sky']:\n units = 'sky'\n if line[:5] in ['image', 'physi', 'pixel']:\n units = 'pixels'\n continue\n\n if 'circle(' in line:\n nline = line.replace('circle(', '')\n nline = nline.replace(')', '')\n nline = nline.replace('\"', '')\n vals = nline.split(',')\n if ':' in vals[0]:\n posval = vals[0] + ' ' + vals[1]\n else:\n posval = (float(vals[0]), float(vals[1]))\n else:\n # Try to interpret unformatted line\n if ',' in line:\n split_tok = ','\n else:\n split_tok = ' '\n vals = line.split(split_tok)\n if len(vals) == 3:\n if ':' in vals[0]:\n posval = vals[0] + ' ' + vals[1]\n else:\n posval = (float(vals[0]), float(vals[1]))\n else:\n continue\n exclusion_list.append(\n {'pos': posval, 'distance': float(vals[2]), 'units': units}\n )\n return exclusion_list"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nfunctioning which reads specified columns from either FITS or ASCII files and returns numpy array or arrays of columns", "response": "def readcols(infile, cols=None):\n \"\"\" Function which reads specified columns from either FITS tables or\n ASCII files\n\n This function reads in the columns specified by the user into numpy\n arrays regardless of the format of the input table (ASCII or FITS\n table).\n\n Parameters\n ----------\n infile : string\n Filename of the input file\n cols : string or list of strings\n Columns to be read into arrays\n\n Returns\n -------\n outarr : array\n Numpy array or arrays of columns from the table\n\n \"\"\"\n if _is_str_none(infile) is None:\n return None\n\n if infile.endswith('.fits'):\n outarr = read_FITS_cols(infile, cols=cols)\n else:\n outarr = read_ASCII_cols(infile, cols=cols)\n return outarr"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreads columns from FITS table", "response": "def read_FITS_cols(infile, cols=None): # noqa: N802\n \"\"\" Read columns from FITS table \"\"\"\n with fits.open(infile, memmap=False) as ftab:\n extnum = 0\n extfound = False\n for extn in ftab:\n if 'tfields' in extn.header:\n extfound = True\n break\n extnum += 1\n\n if not extfound:\n print('ERROR: No catalog table found in ', infile)\n raise ValueError\n\n # Now, read columns from the table in this extension if no column names\n # were provided by user, simply read in all columns from table\n if _is_str_none(cols[0]) is None:\n cols = ftab[extnum].data.names\n\n # Define the output\n outarr = [ftab[extnum].data.field(c) for c in cols]\n\n return outarr"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ninterpret input ASCII file to return arrays for specified columns.", "response": "def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802\n \"\"\" Interpret input ASCII file to return arrays for specified columns.\n\n Notes\n -----\n The specification of the columns should be expected to have lists for\n each 'column', with all columns in each list combined into a single\n entry.\n\n For example::\n\n cols = ['1,2,3','4,5,6',7]\n\n where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec\n values and 7 represents the flux value for a total of 3 requested\n columns of data to be returned.\n\n Returns\n -------\n outarr : list of arrays\n The return value will be a list of numpy arrays, one for each\n 'column'.\n \"\"\"\n # build dictionary representing format of each row\n # Format of dictionary: {'colname':col_number,...}\n # This provides the mapping between column name and column number\n coldict = {}\n with open(infile, 'r') as f:\n flines = f.readlines()\n\n for l in flines: # interpret each line from catalog file\n if l[0].lstrip() == '#' or l.lstrip() == '':\n continue\n else:\n # convert first row of data into column definitions using indices\n coldict = {str(i + 1): i for i, _ in enumerate(l.split())}\n break\n\n numcols = len(cols)\n outarr = [[] for _ in range(numcols)]\n convert_radec = False\n\n # Now, map specified columns to columns in file and populate output arrays\n for l in flines: # interpret each line from catalog file\n l = l.strip()\n lspl = l.split()\n # skip blank lines, comment lines, or lines with\n # fewer columns than requested by user\n if not l or len(lspl) < numcols or l[0] == '#' or \"INDEF\" in l:\n continue\n\n # For each 'column' requested by user, pull data from row\n for c, i in zip(cols, list(range(numcols))):\n cnames = parse_colname(c)\n if len(cnames) > 1:\n # interpret multi-column specification as one value\n outval = ''\n for cn in cnames:\n cnum = coldict[cn]\n cval = lspl[cnum]\n outval += cval + ' '\n outarr[i].append(outval)\n convert_radec = True\n else:\n # pull single value from row for this column\n cnum = coldict[cnames[0]]\n if isfloat(lspl[cnum]):\n cval = float(lspl[cnum])\n else:\n cval = lspl[cnum]\n # Check for multi-column values given as \"nn:nn:nn.s\"\n if ':' in cval:\n cval = cval.replace(':', ' ')\n convert_radec = True\n outarr[i].append(cval)\n\n # convert multi-column RA/Dec specifications\n if convert_radec:\n outra = []\n outdec = []\n for ra, dec in zip(outarr[0], outarr[1]):\n radd, decdd = radec_hmstodd(ra, dec)\n outra.append(radd)\n outdec.append(decdd)\n outarr[0] = outra\n outarr[1] = outdec\n\n # convert all lists to numpy arrays\n for c in range(len(outarr)):\n outarr[c] = np.array(outarr[c])\n\n return outarr"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nwriting out a shiftfile for a given list of input Image class objects", "response": "def write_shiftfile(image_list, filename, outwcs='tweak_wcs.fits'):\n \"\"\" Write out a shiftfile for a given list of input Image class objects\n \"\"\"\n rows = ''\n nrows = 0\n for img in image_list:\n row = img.get_shiftfile_row()\n if row is not None:\n rows += row\n nrows += 1\n if nrows == 0: # If there are no fits to report, do not write out a file\n return\n\n # write out reference WCS now\n if os.path.exists(outwcs):\n os.remove(outwcs)\n p = fits.HDUList()\n p.append(fits.PrimaryHDU())\n p.append(createWcsHDU(image_list[0].refWCS))\n p.writeto(outwcs)\n\n # Write out shiftfile to go with reference WCS\n with open(filename, 'w') as f:\n f.write('# frame: output\\n')\n f.write('# refimage: %s[wcs]\\n' % outwcs)\n f.write('# form: delta\\n')\n f.write('# units: pixels\\n')\n f.write(rows)\n print('Writing out shiftfile :', filename)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef createWcsHDU(wcs): # noqa: N802\n header = wcs.to_header()\n\n header['EXTNAME'] = 'WCS'\n header['EXTVER'] = 1\n\n # Now, update original image size information\n header['NPIX1'] = (wcs.pixel_shape[0], \"Length of array axis 1\")\n header['NPIX2'] = (wcs.pixel_shape[1], \"Length of array axis 2\")\n header['PIXVALUE'] = (0.0, \"values of pixels in array\")\n\n if hasattr(wcs, 'orientat'):\n orientat = wcs.orientat\n else:\n # find orientat from CD or PC matrix\n if wcs.wcs.has_cd():\n cd12 = wcs.wcs.cd[0][1]\n cd22 = wcs.wcs.cd[1][1]\n elif wcs.wcs.has_pc():\n cd12 = wcs.wcs.cdelt[0] * wcs.wcs.pc[0][1]\n cd22 = wcs.wcs.cdelt[1] * wcs.wcs.pc[1][1]\n else:\n raise ValueError(\"Invalid WCS: WCS does not contain neither \"\n \"a CD nor a PC matrix.\")\n orientat = np.rad2deg(np.arctan2(cd12, cd22))\n header['ORIENTAT'] = (orientat, \"position angle of \"\n \"image y axis (deg. e of n)\")\n\n return fits.ImageHDU(None, header)", "response": "Create a WCS HDU that can be used to populate a reference WCS HDU."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngenerates the 2D Gaussian function for the given size nx and ny.", "response": "def gauss_array(nx, ny=None, fwhm=1.0, sigma_x=None, sigma_y=None,\n zero_norm=False):\n \"\"\" Computes the 2D Gaussian with size nx*ny.\n\n Parameters\n ----------\n nx : int\n ny : int [Default: None]\n Size of output array for the generated Gaussian. If ny == None,\n output will be an array nx X nx pixels.\n\n fwhm : float [Default: 1.0]\n Full-width, half-maximum of the Gaussian to be generated\n\n sigma_x : float [Default: None]\n sigma_y : float [Default: None]\n Sigma_x and sigma_y are the stddev of the Gaussian functions.\n\n zero_norm : bool [Default: False]\n The kernel will be normalized to a sum of 1 when True.\n\n Returns\n -------\n gauss_arr : array\n A numpy array with the generated gaussian function\n\n \"\"\"\n\n if ny is None:\n ny = nx\n\n if sigma_x is None:\n if fwhm is None:\n print('A value for either \"fwhm\" or \"sigma_x\" needs to be '\n 'specified!')\n raise ValueError\n else:\n # Convert input FWHM into sigma\n sigma_x = fwhm / (2 * np.sqrt(2 * np.log(2)))\n\n if sigma_y is None:\n sigma_y = sigma_x\n\n xradius = nx // 2\n yradius = ny // 2\n\n # Create grids of distance from center in X and Y\n xarr = np.abs(np.arange(-xradius, xradius + 1))\n yarr = np.abs(np.arange(-yradius, yradius + 1))\n hnx = gauss(xarr, sigma_x)\n hny = gauss(yarr, sigma_y)\n hny = hny.reshape((ny, 1))\n h = hnx * hny\n\n # Normalize gaussian kernel to a sum of 1\n h = h / np.abs(h).sum()\n if zero_norm:\n h -= h.mean()\n\n return h"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef gauss(x, sigma):\n return (np.exp(-np.power(x, 2) / (2 * np.power(sigma, 2))) /\n (sigma * np.sqrt(2 * np.pi)))", "response": "Compute 1 - D value of gaussian at position x relative to center."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef make_vector_plot(coordfile, columns=[1, 2, 3, 4], data=None,\n figure_id=None, title=None, axes=None, every=1,\n labelsize=8, ylimit=None, limit=None, xlower=None,\n ylower=None, output=None, headl=4, headw=3,\n xsh=0.0, ysh=0.0, fit=None, scale=1.0, vector=True,\n textscale=5, append=False, linfit=False, rms=True,\n plotname=None):\n \"\"\" Convert a XYXYMATCH file into a vector plot or set of residuals plots.\n\n This function provides a single interface for generating either a\n vector plot of residuals or a set of 4 plots showing residuals.\n The data being plotted can also be adjusted for a linear fit\n on-the-fly.\n\n Parameters\n ----------\n coordfile : string\n Name of file with matched sets of coordinates. This input file can\n be a file compatible for use with IRAF's geomap.\n columns : list [Default: [0,1,2,3]]\n Column numbers for the X,Y positions from each image\n data : list of arrays\n If specified, this can be used to input matched data directly\n title : string\n Title to be used for the generated plot\n axes : list\n List of X and Y min/max values to customize the plot axes\n every : int [Default: 1]\n Slice value for the data to be plotted\n limit : float\n Radial offset limit for selecting which sources are included in\n the plot\n labelsize : int [Default: 8] or str\n Font size to use for tick labels, either in font points or as a\n string understood by tick_params().\n ylimit : float\n Limit to use for Y range of plots.\n xlower : float\n ylower : float\n Limit in X and/or Y offset for selecting which sources are included\n in the plot\n output : string\n Filename of output file for generated plot\n headl : int [Default: 4]\n Length of arrow head to be used in vector plot\n headw : int [Default: 3]\n Width of arrow head to be used in vector plot\n xsh : float\n ysh : float\n Shift in X and Y from linear fit to be applied to source positions\n from the first image\n scale : float\n Scale from linear fit to be applied to source positions from the\n first image\n fit : array\n Array of linear coefficients for rotation (and scale?) in X and Y\n from a linear fit to be applied to source positions from the\n first image\n vector : bool [Default: True]\n Specifies whether or not to generate a vector plot. If False, task\n will generate a set of 4 residuals plots instead\n textscale : int [Default: 5]\n Scale factor for text used for labelling the generated plot\n append : bool [Default: False]\n If True, will overplot new plot on any pre-existing plot\n linfit : bool [Default: False]\n If True, a linear fit to the residuals will be generated and\n added to the generated residuals plots\n rms : bool [Default: True]\n Specifies whether or not to report the RMS of the residuals as a\n label on the generated plot(s).\n plotname : str [Default: None]\n Write out plot to a file with this name if specified.\n\n \"\"\"\n from matplotlib import pyplot as plt\n\n if data is None:\n data = readcols(coordfile, cols=columns)\n\n xy1x = data[0]\n xy1y = data[1]\n xy2x = data[2]\n xy2y = data[3]\n\n numpts = xy1x.shape[0]\n if fit is not None:\n xy1x, xy1y = apply_db_fit(data, fit, xsh=xsh, ysh=ysh)\n dx = xy2x - xy1x\n dy = xy2y - xy1y\n else:\n dx = xy2x - xy1x - xsh\n dy = xy2y - xy1y - ysh\n # apply scaling factor to deltas\n dx *= scale\n dy *= scale\n\n print('Total # points: {:d}'.format(len(dx)))\n if limit is not None:\n indx = np.sqrt(dx**2 + dy**2) <= limit\n dx = dx[indx].copy()\n dy = dy[indx].copy()\n xy1x = xy1x[indx].copy()\n xy1y = xy1y[indx].copy()\n\n if xlower is not None:\n xindx = np.abs(dx) >= xlower\n dx = dx[xindx].copy()\n dy = dy[xindx].copy()\n xy1x = xy1x[xindx].copy()\n xy1y = xy1y[xindx].copy()\n print('# of points after clipping: {:d}'.format(len(dx)))\n\n dr = np.sqrt(dx**2 + dy**2)\n max_vector = dr.max()\n\n if output is not None:\n write_xy_file(output, [xy1x, xy1y, dx, dy])\n\n fig = plt.figure(num=figure_id)\n if not append:\n plt.clf()\n\n if vector:\n dxs = imagestats.ImageStats(dx.astype(np.float32))\n dys = imagestats.ImageStats(dy.astype(np.float32))\n minx = xy1x.min()\n maxx = xy1x.max()\n miny = xy1y.min()\n maxy = xy1y.max()\n plt_xrange = maxx - minx\n plt_yrange = maxy - miny\n\n qplot = plt.quiver(xy1x[::every], xy1y[::every], dx[::every],\n dy[::every], units='y', headwidth=headw,\n headlength=headl)\n key_dx = 0.01 * plt_xrange\n key_dy = 0.005 * plt_yrange * textscale\n maxvec = max_vector / 2.\n key_len = round(maxvec + 0.005, 2)\n\n plt.xlabel('DX: %.4f to %.4f +/- %.4f' % (dxs.min, dxs.max,\n dxs.stddev))\n plt.ylabel('DY: %.4f to %.4f +/- %.4f' % (dys.min, dys.max,\n dys.stddev))\n plt.title(r\"$Vector\\ plot\\ of\\ %d/%d\\ residuals:\\ %s$\" %\n (xy1x.shape[0], numpts, title))\n plt.quiverkey(qplot, minx + key_dx, miny - key_dy, key_len,\n \"%0.2f pixels\" % (key_len),\n coordinates='data', labelpos='E', labelcolor='Maroon',\n color='Maroon')\n else:\n plot_defs = [[xy1x, dx, \"X (pixels)\", \"DX (pixels)\"],\n [xy1y, dx, \"Y (pixels)\", \"DX (pixels)\"],\n [xy1x, dy, \"X (pixels)\", \"DY (pixels)\"],\n [xy1y, dy, \"Y (pixels)\", \"DY (pixels)\"]]\n if axes is None:\n # Compute a global set of axis limits for all plots\n minx = min(xy1x.min(), xy1y.min())\n maxx = max(xy1x.max(), xy1y.max())\n miny = min(dx.min(), dy.min())\n maxy = max(dx.max(), dy.max())\n else:\n minx = axes[0][0]\n maxx = axes[0][1]\n miny = axes[1][0]\n maxy = axes[1][1]\n\n if ylimit is not None:\n miny = -ylimit\n maxy = ylimit\n\n rms_labelled = False\n if title is None:\n fig.suptitle(\"Residuals [%d/%d]\" % (xy1x.shape[0], numpts),\n ha='center', fontsize=labelsize + 6)\n else:\n # This definition of the title supports math symbols in the title\n fig.suptitle(r\"$\" + title + \"$\", ha='center',\n fontsize=labelsize + 6)\n\n for pnum, p in enumerate(plot_defs):\n pn = pnum + 1\n ax = fig.add_subplot(2, 2, pn)\n plt.plot(\n p[0], p[1], 'b.',\n label='RMS(X) = %.4f, RMS(Y) = %.4f' % (dx.std(), dy.std())\n )\n lx = [int((p[0].min() - 500) / 500) * 500,\n int((p[0].max() + 500) / 500) * 500]\n plt.plot(lx, [0.0, 0.0], 'k', linewidth=3)\n plt.axis([minx, maxx, miny, maxy])\n if rms and not rms_labelled:\n leg_handles, leg_labels = ax.get_legend_handles_labels()\n fig.legend(leg_handles, leg_labels, loc='center left',\n fontsize='small', frameon=False,\n bbox_to_anchor=(0.33, 0.51), borderaxespad=0)\n rms_labelled = True\n\n ax.tick_params(labelsize=labelsize)\n\n # Fine-tune figure; hide x ticks for top plots and y ticks for\n # right plots\n if pn <= 2:\n plt.setp(ax.get_xticklabels(), visible=False)\n else:\n ax.set_xlabel(plot_defs[pnum][2])\n\n if pn % 2 == 0:\n plt.setp(ax.get_yticklabels(), visible=False)\n else:\n ax.set_ylabel(plot_defs[pnum][3])\n\n if linfit:\n lxr = int((lx[-1] - lx[0]) / 100)\n lyr = int((p[1].max() - p[1].min()) / 100)\n a = np.vstack([p[0], np.ones(len(p[0]))]).T\n m, c = np.linalg.lstsq(a, p[1])[0]\n yr = [m * lx[0] + c, lx[-1] * m + c]\n plt.plot([lx[0], lx[-1]], yr, 'r')\n plt.text(\n lx[0] + lxr, p[1].max() + lyr,\n \"%0.5g*x + %0.5g [%0.5g,%0.5g]\" % (m, c, yr[0], yr[1]),\n color='r'\n )\n\n plt.draw()\n\n if plotname:\n suffix = plotname[-4:]\n if '.' not in suffix:\n output += '.png'\n format = 'png'\n else:\n if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:\n format = suffix[1:]\n plt.savefig(plotname, format=format)", "response": "This function converts a XYXYMATCH file into a vector plot or set of 4 plots."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nfinding the center of the peak of offsets", "response": "def find_xy_peak(img, center=None, sigma=3.0):\n \"\"\" Find the center of the peak of offsets \"\"\"\n # find level of noise in histogram\n istats = imagestats.ImageStats(img.astype(np.float32), nclip=1,\n fields='stddev,mode,mean,max,min')\n if istats.stddev == 0.0:\n istats = imagestats.ImageStats(img.astype(np.float32),\n fields='stddev,mode,mean,max,min')\n imgsum = img.sum()\n\n # clip out all values below mean+3*sigma from histogram\n imgc = img[:, :].copy()\n imgc[imgc < istats.mode + istats.stddev * sigma] = 0.0\n # identify position of peak\n yp0, xp0 = np.where(imgc == imgc.max())\n\n # Perform bounds checking on slice from img\n ymin = max(0, int(yp0[0]) - 3)\n ymax = min(img.shape[0], int(yp0[0]) + 4)\n xmin = max(0, int(xp0[0]) - 3)\n xmax = min(img.shape[1], int(xp0[0]) + 4)\n # take sum of at most a 7x7 pixel box around peak\n xp_slice = (slice(ymin, ymax),\n slice(xmin, xmax))\n yp, xp = ndimage.measurements.center_of_mass(img[xp_slice])\n if np.isnan(xp) or np.isnan(yp):\n xp = 0.0\n yp = 0.0\n flux = 0.0\n zpqual = None\n else:\n xp += xp_slice[1].start\n yp += xp_slice[0].start\n\n # compute S/N criteria for this peak: flux/sqrt(mean of rest of array)\n flux = imgc[xp_slice].sum()\n delta_size = float(img.size - imgc[xp_slice].size)\n if delta_size == 0:\n delta_size = 1\n delta_flux = float(imgsum - flux)\n if flux > imgc[xp_slice].max():\n delta_flux = flux - imgc[xp_slice].max()\n else:\n delta_flux = flux\n zpqual = flux / np.sqrt(delta_flux / delta_size)\n if np.isnan(zpqual) or np.isinf(zpqual):\n zpqual = None\n\n if center is not None:\n xp -= center[0]\n yp -= center[1]\n flux = imgc[xp_slice].max()\n\n del imgc\n return xp, yp, flux, zpqual"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nplot a 2d histogram.", "response": "def plot_zeropoint(pars):\n \"\"\" Plot 2d histogram.\n\n Pars will be a dictionary containing:\n data, figure_id, vmax, title_str, xp,yp, searchrad\n \"\"\"\n from matplotlib import pyplot as plt\n\n xp = pars['xp']\n yp = pars['yp']\n searchrad = int(pars['searchrad'] + 0.5)\n\n plt.figure(num=pars['figure_id'])\n plt.clf()\n\n if pars['interactive']:\n plt.ion()\n else:\n plt.ioff()\n\n plt.imshow(pars['data'], vmin=0, vmax=pars['vmax'],\n interpolation='nearest')\n plt.viridis()\n plt.colorbar()\n plt.title(pars['title_str'])\n plt.plot(xp + searchrad, yp + searchrad, color='red', marker='+',\n markersize=24)\n plt.plot(searchrad, searchrad, color='yellow', marker='+', markersize=120)\n plt.text(searchrad, searchrad, \"Offset=0,0\", verticalalignment='bottom',\n color='yellow')\n plt.xlabel(\"Offset in X (pixels)\")\n plt.ylabel(\"Offset in Y (pixels)\")\n if pars['interactive']:\n plt.show()\n\n if pars['plotname']:\n suffix = pars['plotname'][-4:]\n output = pars['plotname']\n if '.' not in suffix:\n output += '.png'\n format = 'png'\n else:\n if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:\n format = suffix[1:]\n plt.savefig(output, format=format)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nbuild ZP matrix for a single image and a single ref xy position.", "response": "def build_xy_zeropoint(imgxy, refxy, searchrad=3.0, histplot=False,\n figure_id=1, plotname=None, interactive=True):\n \"\"\" Create a matrix which contains the delta between each XY position and\n each UV position.\n \"\"\"\n print('Computing initial guess for X and Y shifts...')\n\n # run C function to create ZP matrix\n zpmat = cdriz.arrxyzero(imgxy.astype(np.float32), refxy.astype(np.float32),\n searchrad)\n\n xp, yp, flux, zpqual = find_xy_peak(zpmat, center=(searchrad, searchrad))\n if zpqual is not None:\n print('Found initial X and Y shifts of ', xp, yp)\n print(' with significance of ', zpqual, 'and ', flux, ' matches')\n else:\n # try with a lower sigma to detect a peak in a sparse set of sources\n xp, yp, flux, zpqual = find_xy_peak(\n zpmat, center=(searchrad, searchrad), sigma=1.0\n )\n if zpqual:\n print('Found initial X and Y shifts of ', xp, yp)\n print(' with significance of ', zpqual, 'and ',\n flux, ' matches')\n else:\n print('!' * 80)\n print('!')\n print('! WARNING: No valid shift found within a search radius of ',\n searchrad, ' pixels.')\n print('!')\n print('!' * 80)\n\n if histplot:\n zpstd = flux // 5\n if zpstd < 10:\n zpstd = 10\n if zpqual is None:\n zpstd = 10\n\n title_str = (\"Histogram of offsets: Peak has %d matches at \"\n \"(%0.4g, %0.4g)\" % (flux, xp, yp))\n\n plot_pars = {'data': zpmat, 'figure_id': figure_id, 'vmax': zpstd,\n 'xp': xp, 'yp': yp, 'searchrad': searchrad,\n 'title_str': title_str, 'plotname': plotname,\n 'interactive': interactive}\n\n plot_zeropoint(plot_pars)\n\n return xp, yp, flux, zpqual"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nbuilds a grid of positions starting at X Y given by start and ending at X Y given by end.", "response": "def build_pos_grid(start, end, nstep, mesh=False):\n \"\"\"\n Return a grid of positions starting at X,Y given by 'start', and ending\n at X,Y given by 'end'. The grid will be completely filled in X and Y by\n every 'step' interval.\n \"\"\"\n # Build X and Y arrays\n dx = end[0] - start[0]\n if dx < 0:\n nstart = end\n end = start\n start = nstart\n dx = -dx\n stepx = dx / nstep\n # Perform linear fit to find exact line that connects start and end\n xarr = np.arange(start[0], end[0] + stepx / 2.0, stepx)\n yarr = np.interp(xarr, [start[0], end[0]], [start[1], end[1]])\n\n # create grid of positions\n if mesh:\n xa, ya = np.meshgrid(xarr, yarr)\n xarr = xa.ravel()\n yarr = ya.ravel()\n\n return xarr, yarr"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef find_DQ_extension(self):\n dqfile = None\n # Look for additional file with DQ array, primarily for WFPC2 data\n\n indx = self._filename.find('.fits')\n\n if indx > 3:\n suffix = self._filename[indx-4:indx]\n dqfile = self._filename.replace(suffix[:3],'_c1')\n\n elif indx < 0 and len(self._filename) > 3 and \\\n self._filename[-4] == os.extsep and \\\n self._filename[-1].lower() == 'h':\n\n # assume we've got a GEIS file\n dqfile = self._filename[:-2]+'1'+self._filename[-1]\n hdulist = readgeis.readgeis(dqfile)\n prih = hdulist[0].header\n if 'FILETYPE' in prih:\n dq_suffix = prih['FILETYPE'].strip().upper()\n else:\n # assume extension name is 'SDQ' for WFPC2 GEIS files\n dq_suffix = 'SDQ'\n hdulist.close()\n return dqfile,dq_suffix\n\n else:\n raise ValueError(\"Input file {} does not appear to be neither \" \\\n \"a FITS file nor a GEIS file.\".format(self._filename))\n\n if os.path.exists(dqfile):\n dq_suffix = fits.getval(dqfile, \"EXTNAME\", ext=1, memmap=False)\n else:\n dq_suffix = \"SCI\"\n\n return dqfile, dq_suffix", "response": "Return the suffix for the data quality extension and the name of\n the file which that DQ extension should be read from."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef setInstrumentParameters(self, instrpars):\n pri_header = self._image[0].header\n self.proc_unit = instrpars['proc_unit']\n instrpars['gnkeyword'] = 'ATODGAIN' # hard-code for WFPC2 data\n instrpars['rnkeyword'] = None\n\n if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):\n instrpars['expkeyword'] = 'EXPTIME'\n\n for chip in self.returnAllChips(extname=self.scienceExt):\n chip._headergain = self.getInstrParameter(\n instrpars['gain'], pri_header, instrpars['gnkeyword']\n )\n chip._exptime = self.getInstrParameter(\n instrpars['exptime'], pri_header, instrpars['expkeyword']\n )\n # We need to treat Read Noise as a special case since it is\n # not populated in the WFPC2 primary header\n if instrpars['rnkeyword'] is None:\n chip._rdnoise = None\n else:\n chip._rdnoise = self.getInstrParameter(\n instrpars['rdnoise'], pri_header, instrpars['rnkeyword']\n )\n\n if chip._headergain is None or chip._exptime is None:\n print('ERROR: invalid instrument task parameter')\n raise ValueError\n\n # We need to determine if the user has used the default readnoise/gain value\n # since if not, they will need to supply a gain/readnoise value as well\n usingDefaultGain = instrpars['gnkeyword'] == 'ATODGAIN'\n usingDefaultReadnoise = instrpars['rnkeyword'] in [None, 'None']\n\n # If the user has specified either the readnoise or the gain, we need to make sure\n # that they have actually specified both values. In the default case, the readnoise\n # of the system depends on what the gain\n\n if usingDefaultReadnoise and usingDefaultGain:\n self._setchippars()\n elif usingDefaultReadnoise and not usingDefaultGain:\n raise ValueError(\"ERROR: You need to supply readnoise information\\n when not using the default gain for WFPC2.\")\n elif not usingDefaultReadnoise and usingDefaultGain:\n raise ValueError(\"ERROR: You need to supply gain information when\\n not using the default readnoise for WFPC2.\")\n else:\n # In this case, the user has specified both a gain and readnoise values. Just use them as is.\n for chip in self.returnAllChips(extname=self.scienceExt):\n chip._gain = chip._headergain\n print(\"Using user defined values for gain and readnoise\")\n\n # Convert the science data to electrons\n self.doUnitConversions()", "response": "This method is used to set the default values into the internal dictionary of parameters for the specified instrument task."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef doUnitConversions(self):\n # Image information\n _handle = fileutil.openImage(self._filename, mode='readonly', memmap=False)\n\n # Now convert the SCI array(s) units\n for det in range(1,self._numchips+1):\n chip=self._image[self.scienceExt,det]\n conversionFactor = 1.0\n # add D2IMFILE to outputNames for removal by 'clean()' method later\n if 'D2IMFILE' in _handle[0].header and _handle[0].header['D2IMFILE'] not in [\"\",\"N/A\"]:\n chip.outputNames['d2imfile'] = _handle[0].header['D2IMFILE']\n\n if chip._gain is not None:\n \"\"\"\n # Multiply the values of the sci extension pixels by the gain.\n print \"Converting %s[%d] from COUNTS to ELECTRONS\"%(self._filename,det)\n\n # If the exptime is 0 the science image will be zeroed out.\n np.multiply(_handle[self.scienceExt,det].data,chip._gain,_handle[self.scienceExt,det].data)\n chip.data=_handle[self.scienceExt,det].data\n\n # Set the BUNIT keyword to 'electrons'\n chip._bunit = 'ELECTRONS'\n chip.header.update('BUNIT','ELECTRONS')\n _handle[self.scienceExt,det].header.update('BUNIT','ELECTRONS')\n\n # Update the PHOTFLAM value\n photflam = _handle[self.scienceExt,det].header['PHOTFLAM']\n _handle[self.scienceExt,det].header.update('PHOTFLAM',(photflam/chip._gain))\n \"\"\"\n conversionFactor = chip._gain\n chip._effGain = chip._gain #1.\n chip._conversionFactor = conversionFactor #1.\n\n else:\n msg = \"Invalid gain value for data, no conversion done\"\n print(msg)\n raise ValueError(msg)\n\n # Close the files and clean-up\n _handle.close()\n self._effGain = conversionFactor", "response": "Apply unit conversions to all the chips in the file."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getdarkcurrent(self,exten):\n darkrate = 0.005 # electrons / s\n if self.proc_unit == 'native':\n darkrate = darkrate / self.getGain(exten) #count/s\n\n try:\n chip = self._image[0]\n darkcurrent = chip.header['DARKTIME'] * darkrate\n\n except:\n msg = \"#############################################\\n\"\n msg += \"# #\\n\"\n msg += \"# Error: #\\n\"\n msg += \"# Cannot find the value for 'DARKTIME' #\\n\"\n msg += \"# in the image header. WFPC2 input #\\n\"\n msg += \"# images are expected to have this header #\\n\"\n msg += \"# keyword. #\\n\"\n msg += \"# #\\n\"\n msg += \"# Error occured in the WFPC2InputImage class#\\n\"\n msg += \"# #\\n\"\n msg += \"#############################################\\n\"\n raise ValueError(msg)\n\n return darkcurrent", "response": "Return the dark current for the WFPC2 detector."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef getReadNoise(self, exten):\n rn = self._image[exten]._rdnoise\n if self.proc_unit == 'native':\n rn = self._rdnoise / self.getGain(exten)\n return rn", "response": "Method for returning the read noise of a detector in units of counts."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nbuilding the mask file for the specified chip.", "response": "def buildMask(self, chip, bits=0, write=False):\n \"\"\" Build masks as specified in the user parameters found in the\n configObj object.\n \"\"\"\n sci_chip = self._image[self.scienceExt,chip]\n ### For WFPC2 Data, build mask files using:\n maskname = sci_chip.dqrootname+'_dqmask.fits'\n dqmask_name = buildmask.buildShadowMaskImage(sci_chip.dqfile,sci_chip.detnum,sci_chip.extnum,maskname,bitvalue=bits,binned=sci_chip.binned)\n sci_chip.dqmaskname = dqmask_name\n sci_chip.outputNames['dqmask'] = dqmask_name\n sci_chip.outputNames['tmpmask'] = 'wfpc2_inmask%d.fits'%(sci_chip.detnum)\n dqmask = fits.getdata(dqmask_name, ext=0, memmap=False)\n return dqmask"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef generateCatalog(wcs, mode='automatic', catalog=None,\n src_find_filters=None, **kwargs):\n \"\"\" Function which determines what type of catalog object needs to be\n instantiated based on what type of source selection algorithm the user\n specified.\n\n Parameters\n ----------\n wcs : obj\n WCS object generated by STWCS or PyWCS\n\n catalog : str or ndarray\n Filename of existing catalog or ndarray of image for generation of\n source catalog.\n\n kwargs : dict\n Parameters needed to interpret source catalog from input catalog\n with `findmode` being required.\n\n Returns\n -------\n catalog : obj\n A Catalog-based class instance for keeping track of WCS and\n associated source catalog\n\n \"\"\"\n if not isinstance(catalog,Catalog):\n if mode == 'automatic': # if an array is provided as the source\n # Create a new catalog directly from the image\n catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs)\n else: # a catalog file was provided as the catalog source\n catalog = UserCatalog(wcs,catalog,**kwargs)\n\n return catalog", "response": "Function which determines what type of source catalog object needs to be instantiated based on what type of source selection algorithm the user has specified."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef generateRaDec(self):\n self.prefix = self.PAR_PREFIX\n\n if not isinstance(self.wcs,pywcs.WCS):\n print(\n textutil.textbox(\n 'WCS not a valid PyWCS object. '\n 'Conversion of RA/Dec not possible...'\n ),\n file=sys.stderr\n )\n raise ValueError\n\n if self.xypos is None or len(self.xypos[0]) == 0:\n self.xypos = None\n warnstr = textutil.textbox(\n 'WARNING: \\n'\n 'No objects found for this image...'\n )\n\n for line in warnstr.split('\\n'):\n log.warning(line)\n\n print(warnstr)\n return\n\n if self.radec is None:\n print(' Found {:d} objects.'.format(len(self.xypos[0])))\n if self.wcs is not None:\n ra, dec = self.wcs.all_pix2world(self.xypos[0], self.xypos[1], self.origin)\n self.radec = [ra, dec] + copy.deepcopy(self.xypos[2:])\n else:\n # If we have no WCS, simply pass along the XY input positions\n # under the assumption they were already sky positions.\n self.radec = copy.deepcopy(self.xypos)", "response": "Generate RA and DEC coordinates for a single object."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ntrimming sky catalog to remove any sources within exclusion regions specified by exclusion file.", "response": "def apply_exclusions(self,exclusions):\n \"\"\" Trim sky catalog to remove any sources within regions specified by\n exclusions file.\n \"\"\"\n # parse exclusion file into list of positions and distances\n exclusion_coords = tweakutils.parse_exclusions(exclusions)\n if exclusion_coords is None:\n return\n\n excluded_list = []\n radec_indx = list(range(len(self.radec[0])))\n for ra,dec,indx in zip(self.radec[0],self.radec[1],radec_indx):\n src_pos = coords.SkyCoord(ra=ra,dec=dec,unit=(u.hourangle,u.deg))\n\n # check to see whether this source is within an exclusion region\n for reg in exclusion_coords:\n if reg['units'] == 'sky':\n regpos = reg['pos']\n regdist = reg['distance'] # units: arcsec\n else:\n regradec = self.wcs.all_pix2world([reg['pos']],1)[0]\n regpos = (regradec[0],regradec[1])\n regdist = reg['distance']*self.wcs.pscale # units: arcsec\n\n epos = coords.SkyCoord(ra=regpos[0],dec=regpos[1],unit=(u.hourangle,u.deg))\n if float(epos.separation(src_pos).to_string(unit=u.arcsec,decimal=True)) <= regdist:\n excluded_list.append(indx)\n break\n # create a list of all 'good' sources outside all exclusion regions\n for e in excluded_list: radec_indx.remove(e)\n radec_indx = np.array(radec_indx,dtype=int)\n num_excluded = len(excluded_list)\n if num_excluded > 0:\n radec_trimmed = []\n xypos_trimmed = []\n for arr in self.radec:\n radec_trimmed.append(arr[radec_indx])\n for arr in self.xypos:\n xypos_trimmed.append(arr[radec_indx])\n xypos_trimmed[-1] = np.arange(len(xypos_trimmed[0]))\n self.radec = radec_trimmed\n self.xypos = xypos_trimmed\n log.info('Excluded %d sources from catalog.'%num_excluded)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef apply_flux_limits(self):\n if not self._apply_flux_limits:\n return\n\n # only if limits are set should they be applied\n if ((self.maxflux is None and self.minflux is None) or\n self.fluxunits is None):\n return\n\n print(\"\\n Applying flux limits...\")\n print(\" minflux = {}\".format(self.minflux))\n print(\" maxflux = {}\".format(self.maxflux))\n print(\" fluxunits = '{:s}'\".format(self.fluxunits))\n print(\" nbright = {}\".format(self.nbright))\n\n # start by checking to see whether fluxes were read in to use for\n # applying the limits\n if not self.flux_col:\n print(\" WARNING: Catalog did not contain fluxes for use in trimming...\")\n return\n\n if self.xypos is not None and self.radec is not None:\n if len(self.xypos) < len(self.radec):\n src_cat = self.radec\n else:\n src_cat = self.xypos\n else:\n src_cat = self.radec if self.xypos is None else self.xypos\n\n if src_cat is None:\n raise RuntimeError(\"No catalogs available for filtering\")\n\n if len(src_cat) < 3:\n print(\" WARNING: No fluxes read in for catalog for use in trimming...\")\n return\n\n fluxes = copy.deepcopy(src_cat[2])\n\n # apply limits equally to all .radec and .xypos entries\n # Start by clipping by any specified flux range\n if self.fluxunits == 'mag':\n if self.minflux is None:\n flux_mask = fluxes >= self.maxflux\n elif self.maxflux is None:\n flux_mask = fluxes <= self.minflux\n else:\n flux_mask = (fluxes <= self.minflux) & (fluxes >= self.maxflux)\n else:\n if self.minflux is None:\n flux_mask = fluxes <= self.maxflux\n elif self.maxflux is None:\n flux_mask = fluxes >= self.minflux\n else:\n flux_mask = (fluxes >= self.minflux) & (fluxes <= self.maxflux)\n\n if self.radec is None:\n all_radec = None\n else:\n all_radec = [rd[flux_mask].copy() for rd in self.radec]\n\n if self.xypos is None:\n all_xypos = None\n else:\n all_xypos = [xy[flux_mask].copy() for xy in self.xypos]\n\n nrem = flux_mask.size - np.count_nonzero(flux_mask)\n print(\" Removed {:d} sources based on flux limits.\".format(nrem))\n\n if self.nbright is not None:\n print(\"Selecting catalog based on {} brightest sources\".format(self.nbright))\n fluxes = fluxes[flux_mask]\n\n # find indices of brightest sources\n idx = np.argsort(fluxes)\n if self.fluxunits == 'mag':\n idx = idx[:self.nbright]\n else:\n idx = (idx[::-1])[:self.nbright]\n\n # pick out only the brightest 'nbright' sources\n if all_radec is not None:\n all_radec = [rd[idx] for rd in all_radec]\n if all_xypos is not None:\n all_xypos = [xy[idx] for xy in all_xypos]\n\n self.radec = all_radec\n self.xypos = all_xypos\n\n if len(self.radec[0]) == 0:\n print(\"Trimming of catalog resulted in NO valid sources! \")\n raise ValueError", "response": "Apply any user - specified limits on source selection\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nwrite out the X and Y source catalog to a file.", "response": "def writeXYCatalog(self,filename):\n \"\"\" Write out the X,Y catalog to a file\n \"\"\"\n if self.xypos is None:\n warnstr = textutil.textbox(\n 'WARNING: \\n No X,Y source catalog to write to file. ')\n for line in warnstr.split('\\n'):\n log.warning(line)\n print(warnstr)\n return\n\n f = open(filename,'w')\n f.write(\"# Source catalog derived for %s\\n\"%self.wcs.filename)\n f.write(\"# Columns: \\n\")\n if self.use_sharp_round:\n f.write('# X Y Flux ID Sharp Round1 Round2\\n')\n else:\n f.write('# X Y Flux ID\\n')\n f.write('# (%s) (%s)\\n'%(self.in_units,self.in_units))\n\n for row in range(len(self.xypos[0])):\n for i in range(len(self.xypos)):\n f.write(\"%g \"%(self.xypos[i][row]))\n f.write(\"\\n\")\n\n f.close()"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ngenerating source catalog from input image using DAOFIND - style algorithm.", "response": "def generateXY(self, **kwargs):\n \"\"\" Generate source catalog from input image using DAOFIND-style algorithm\n \"\"\"\n #x,y,flux,sharp,round = idlphot.find(array,self.pars['hmin'],self.pars['fwhm'],\n # roundlim=self.pars['roundlim'], sharplim=self.pars['sharplim'])\n print(\" # Source finding for '{}', EXT={} started at: {}\"\n .format(self.fnamenoext, self.wcs.extname, util._ptime()[0]))\n if self.pars['computesig']:\n # compute sigma for this image\n sigma = self._compute_sigma()\n else:\n sigma = self.pars['skysigma']\n skymode = sigma**2\n log.info(' Finding sources using sky sigma = %f'%sigma)\n if self.pars['threshold'] in [None,\"INDEF\",\"\",\" \"]:\n hmin = skymode\n else:\n hmin = sigma*self.pars['threshold']\n\n if 'mask' in kwargs and kwargs['mask'] is not None:\n dqmask = np.asarray(kwargs['mask'], dtype=bool)\n else:\n dqmask = None\n\n # get the mask for source finding:\n mask = self._combine_exclude_mask(dqmask)\n\n x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind(\n self.source,\n hmin,\n self.pars['conv_width'],\n skymode,\n sharplim=[self.pars['sharplo'],self.pars['sharphi']],\n roundlim=[self.pars['roundlo'],self.pars['roundhi']],\n peakmin=self.pars['peakmin'],\n peakmax=self.pars['peakmax'],\n fluxmin=self.pars['fluxmin'],\n fluxmax=self.pars['fluxmax'],\n nsigma=self.pars['nsigma'],\n ratio=self.pars['ratio'],\n theta=self.pars['theta'],\n mask=mask,\n use_sharp_round=self.use_sharp_round,\n nbright=self.nbright\n )\n\n if len(x) == 0:\n if not self.pars['computesig']:\n sigma = self._compute_sigma()\n hmin = sigma * self.pars['threshold']\n log.info('No sources found with original thresholds. Trying automatic settings.')\n x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind(\n self.source,\n hmin,\n self.pars['conv_width'],\n skymode,\n sharplim=[self.pars['sharplo'],self.pars['sharphi']],\n roundlim=[self.pars['roundlo'],self.pars['roundhi']],\n peakmin=self.pars['peakmin'],\n peakmax=self.pars['peakmax'],\n fluxmin=self.pars['fluxmin'],\n fluxmax=self.pars['fluxmax'],\n nsigma=self.pars['nsigma'],\n ratio=self.pars['ratio'],\n theta=self.pars['theta'],\n mask = mask,\n use_sharp_round = self.use_sharp_round,\n nbright=self.nbright\n )\n if len(x) == 0:\n xypostypes = 3*[float]+[int]+(3 if self.use_sharp_round else 0)*[float]\n self.xypos = [np.empty(0, dtype=i) for i in xypostypes]\n warnstr = textutil.textbox('WARNING: \\n'+\n 'No valid sources found with the current parameter values!')\n for line in warnstr.split('\\n'):\n log.warning(line)\n print(warnstr)\n else:\n # convert the positions from numpy 0-based to FITS 1-based\n if self.use_sharp_round:\n self.xypos = [x+1, y+1, flux, src_id+self.start_id, sharp, round1, round2]\n else:\n self.xypos = [x+1, y+1, flux, src_id+self.start_id]\n\n log.info('###Source finding finished at: %s'%(util._ptime()[0]))\n\n self.in_units = 'pixels' # Not strictly necessary, but documents units when determined\n self.sharp = sharp\n self.round1 = round1\n self.round2 = round2\n self.numcols = 7 if self.use_sharp_round else 4\n self.num_objects = len(x)\n self._apply_flux_limits = False"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef generateXY(self, **kwargs):\n self.num_objects = 0\n xycols = self._readCatalog()\n\n if xycols is not None:\n # convert the catalog into attribute\n self.xypos = xycols[:3]\n # convert optional columns if they are present\n if self.numcols > 3:\n self.xypos.append(np.asarray(xycols[3], dtype=int)) # source ID\n if self.numcols > 4:\n self.sharp = xycols[4]\n if self.numcols > 5:\n self.round1 = xycols[5]\n if self.numcols > 6:\n self.round2 = xycols[6]\n self.num_objects = len(xycols[0])\n\n if self.numcols < 3: # account for flux column\n self.xypos.append(np.zeros(self.num_objects, dtype=float))\n self.flux_col = False\n\n if self.numcols < 4: # add source ID column\n self.xypos.append(np.arange(self.num_objects)+self.start_id)\n\n if self.use_sharp_round:\n for i in range(len(self.xypos), 7):\n self.xypos.append(np.zeros(self.num_objects, dtype=float))\n self.sharp_col = False\n\n if self.pars['xyunits'] == 'degrees':\n self.radec = [x.copy() for x in self.xypos]\n if self.wcs is not None:\n self.xypos[:2] = list(self.wcs.all_world2pix(np.array(self.xypos[:2]).T, self.origin).T)", "response": "Method to generate the xy column of the source and the source ID of the object."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nplotting the source catalog positions using matplotlib s plot function such as vmin vmax cmap marker marker", "response": "def plotXYCatalog(self, **kwargs):\n \"\"\"\n Plots the source catalog positions using matplotlib's `pyplot.plot()`\n\n Plotting `kwargs` that can also be passed include any keywords understood\n by matplotlib's `pyplot.plot()` function such as::\n\n vmin, vmax, cmap, marker\n\n \"\"\"\n try:\n from matplotlib import pyplot as pl\n except:\n pl = None\n\n if pl is not None:\n pl.clf()\n pl.plot(self.xypos[0],self.xypos[1],**kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef run(configObj,wcsmap=None):\n\n # Insure all output filenames specified have .fits extensions\n if configObj['outdata'][-5:] != '.fits': configObj['outdata'] += '.fits'\n\n scale_pars = configObj['Data Scaling Parameters']\n user_wcs_pars = configObj['User WCS Parameters']\n\n # PyFITS can be used here as it will always operate on\n # output from PyDrizzle (which will always be a FITS file)\n # Open the input (drizzled?) image\n _fname,_sciextn = fileutil.parseFilename(configObj['data'])\n _inimg = fileutil.openImage(_fname, memmap=False)\n _expin = fileutil.getKeyword(configObj['data'],scale_pars['expkey'],handle=_inimg)\n\n # Return the PyFITS HDU corresponding to the named extension\n _scihdu = fileutil.getExtn(_inimg,_sciextn)\n _insci = _scihdu.data.copy()\n\n _inexptime = 1.0\n if scale_pars['in_units'] == 'counts':\n if scale_pars['expkey'] in _inimg['PRIMARY'].header:\n _inexptime = _inimg['PRIMARY'].header[scale_pars['expkey']]\n elif 'DRIZEXPT' in _inimg['PRIMARY'].header:\n # Try keyword written out by new 'drizzle' if no valid 'expkey' was given\n _inexptime = _inimg['PRIMARY'].header['DRIZEXPT']\n else:\n raise ValueError('No valid exposure time keyword could be found '\n 'for input %s' % configObj['data'])\n # always convert input to 'cps' for blot() algorithm\n if _inexptime != 0.0 or _inexptime != 1.0:\n np.divide(_insci, _inexptime, _insci)\n\n _inimg.close()\n del _inimg\n\n # read in WCS from source (drizzled) image\n source_wcs = stwcs.wcsutil.HSTWCS(configObj['data'])\n if source_wcs.wcs.is_unity():\n print(\"WARNING: No valid WCS found for input drizzled image: {}!\".format(configObj['data']))\n\n # define blot_wcs\n blot_wcs = None\n _refname,_refextn = fileutil.parseFilename(configObj['reference'])\n if os.path.exists(_refname):\n # read in WCS from pre-existing output image\n blot_wcs = stwcs.wcsutil.HSTWCS(configObj['reference'])\n if blot_wcs.wcs.is_unity():\n print(\"WARNING: No valid WCS found for output image: {} !\".format(configObj['reference']))\n\n # define blot WCS based on input images or specified reference WCS values\n if user_wcs_pars['user_wcs']:\n blot_wcs = wcs_functions.build_hstwcs(\n user_wcs_pars['raref'], user_wcs_pars['decref'],\n user_wcs_pars['xrefpix'], user_wcs_pars['yrefpix'],\n user_wcs_pars['outnx'], user_wcs_pars['outny'],\n user_wcs_pars['outscale'], user_wcs_pars['orient'] )\n configObj['coeffs'] = None\n\n # If blot_wcs is still not defined at this point, we have a problem...\n if blot_wcs is None:\n blot_wcs = stwcs.distortion.utils.output_wcs([source_wcs],undistort=False)\n\n out_wcs = blot_wcs.copy()\n # perform blotting operation now\n _outsci = do_blot(_insci, source_wcs, out_wcs, _expin, coeffs=configObj['coeffs'],\n interp=configObj['interpol'], sinscl=configObj['sinscl'],\n stepsize=configObj['stepsize'], wcsmap=wcsmap)\n # create output with proper units and exptime-scaling\n if scale_pars['out_units'] == 'counts':\n if scale_pars['expout'] == 'input':\n _outscale = fileutil.getKeyword(configObj['reference'],scale_pars['expkey'])\n #_outscale = _expin\n else:\n _outscale = float(scale_pars['expout'])\n print(\"Output blotted images scaled by exptime of {}\".format(_outscale))\n np.multiply(_outsci, _outscale, _outsci)\n\n # Add sky back in to the blotted image, as specified by the user\n if configObj['addsky']:\n skyval = _scihdu.header['MDRIZSKY']\n else:\n skyval = configObj['skyval']\n print(\"Added {} counts back in to blotted image as sky.\".format(skyval))\n _outsci += skyval\n\n del _scihdu\n\n # Write output Numpy objects to a PyFITS file\n # Blotting only occurs from a drizzled SCI extension\n # to a blotted SCI extension...\n outputimage.writeSingleFITS(_outsci,blot_wcs, configObj['outdata'],configObj['reference'])", "response": "Run the blot task based on parameters provided interactively by the user."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncores functionality of performing the 'blot' operation to create a single blotted image from a single source image. All distortion information is assumed to be included in the WCS specification of the 'output' blotted image given in 'blot_wcs'. This is the simplest interface that can be called for stand-alone use of the blotting function. Parameters ---------- source Input numpy array of undistorted source image in units of 'cps'. source_wcs HSTWCS object representing source image distortion-corrected WCS. blot_wcs (py)wcs.WCS object representing the blotted image WCS. exptime exptime to use for scaling output blot image. A value of 1 will result in output blot image in units of 'cps'. coeffs Flag to specify whether or not to use distortion coefficients associated with blot_wcs. If False, do not apply any distortion model. interp Form of interpolation to use when blotting pixels. Valid options:: \"nearest\",\"linear\",\"poly3\", \"poly5\"(default), \"spline3\", \"sinc\" sinscl Scale for sinc interpolation kernel (in output, blotted pixels) stepsize Number of pixels for WCS interpolation wcsmap Custom mapping class to use to provide transformation from drizzled to blotted WCS. Default will be to use `drizzlepac.wcs_functions.WCSMap`.", "response": "def do_blot(source, source_wcs, blot_wcs, exptime, coeffs = True,\n interp='poly5', sinscl=1.0, stepsize=10, wcsmap=None):\n \"\"\" Core functionality of performing the 'blot' operation to create a single\n blotted image from a single source image.\n All distortion information is assumed to be included in the WCS specification\n of the 'output' blotted image given in 'blot_wcs'.\n\n This is the simplest interface that can be called for stand-alone\n use of the blotting function.\n\n Parameters\n ----------\n source\n Input numpy array of undistorted source image in units of 'cps'.\n source_wcs\n HSTWCS object representing source image distortion-corrected WCS.\n blot_wcs\n (py)wcs.WCS object representing the blotted image WCS.\n exptime\n exptime to use for scaling output blot image. A value of 1 will\n result in output blot image in units of 'cps'.\n coeffs\n Flag to specify whether or not to use distortion coefficients\n associated with blot_wcs. If False, do not apply any distortion\n model.\n interp\n Form of interpolation to use when blotting pixels. Valid options::\n\n \"nearest\",\"linear\",\"poly3\", \"poly5\"(default), \"spline3\", \"sinc\"\n sinscl\n Scale for sinc interpolation kernel (in output, blotted pixels)\n stepsize\n Number of pixels for WCS interpolation\n wcsmap\n Custom mapping class to use to provide transformation from\n drizzled to blotted WCS. Default will be to use\n `drizzlepac.wcs_functions.WCSMap`.\n\n \"\"\"\n _outsci = np.zeros(blot_wcs.array_shape, dtype=np.float32)\n\n # Now pass numpy objects to callable version of Blot...\n build=False\n misval = 0.0\n kscale = 1.0\n\n xmin = 1\n ymin = 1\n xmax, ymax = source_wcs.pixel_shape\n\n # compute the undistorted 'natural' plate scale for this chip\n if coeffs:\n wcslin = distortion.utils.make_orthogonal_cd(blot_wcs)\n else:\n wcslin = blot_wcs\n blot_wcs.sip = None\n blot_wcs.cpdis1 = None\n blot_wcs.cpdis2 = None\n blot_wcs.det2im = None\n\n if wcsmap is None and cdriz is not None:\n \"\"\"\n Use default C mapping function.\n \"\"\"\n print('Using default C-based coordinate transformation...')\n mapping = cdriz.DefaultWCSMapping(\n blot_wcs, source_wcs,\n blot_wcs.pixel_shape[0], blot_wcs.pixel_shape[1],\n stepsize\n )\n pix_ratio = source_wcs.pscale/wcslin.pscale\n else:\n #\n ##Using the Python class for the WCS-based transformation\n #\n # Use user provided mapping function\n print('Using coordinate transformation defined by user...')\n if wcsmap is None:\n wcsmap = wcs_functions.WCSMap\n wmap = wcsmap(blot_wcs,source_wcs)\n mapping = wmap.forward\n pix_ratio = source_wcs.pscale/wcslin.pscale\n\n t = cdriz.tblot(\n source, _outsci,xmin,xmax,ymin,ymax,\n pix_ratio, kscale, 1.0, 1.0,\n 'center',interp, exptime,\n misval, sinscl, 1, mapping)\n del mapping\n\n return _outsci"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nprocesses the input file and return the ASNs of the current ASNs.", "response": "def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,\n headerlets=True, align_to_gaia=True):\n \"\"\" Run astrodrizzle on input file/ASN table\n using default values for astrodrizzle parameters.\n \"\"\"\n # We only need to import this package if a user run the task\n import drizzlepac\n from drizzlepac import processInput # used for creating new ASNs for _flc inputs\n from stwcs import updatewcs\n from drizzlepac import alignimages\n \n # interpret envvar variable, if specified\n if envvar_compute_name in os.environ:\n val = os.environ[envvar_compute_name].lower()\n if val not in envvar_bool_dict:\n msg = \"ERROR: invalid value for {}.\".format(envvar_compute_name)\n msg += \" \\n Valid Values: on, off, yes, no, true, false\"\n raise ValueError(msg) \n align_to_gaia = envvar_bool_dict[val]\n\n if envvar_new_apriori_name in os.environ:\n # Reset ASTROMETRY_STEP_CONTROL based on this variable\n # This provides backward-compatibility until ASTROMETRY_STEP_CONTROL\n # gets removed entirely.\n val = os.environ[envvar_new_apriori_name].lower()\n if val not in envvar_dict:\n msg = \"ERROR: invalid value for {}.\".format(envvar_new_apriori_name)\n msg += \" \\n Valid Values: on, off, yes, no, true, false\"\n raise ValueError(msg)\n\n os.environ[envvar_old_apriori_name] = envvar_dict[val]\n\n if headerlets or align_to_gaia:\n from stwcs.wcsutil import headerlet\n\n # Open the input file\n try:\n # Make sure given filename is complete and exists...\n inFilename = fileutil.buildRootname(inFile,ext=['.fits'])\n if not os.path.exists(inFilename):\n print(\"ERROR: Input file - %s - does not exist.\" % inFilename)\n return\n except TypeError:\n print(\"ERROR: Inappropriate input file.\")\n return\n\n #If newpath was specified, move all files to that directory for processing\n if newpath:\n orig_processing_dir = os.getcwd()\n new_processing_dir = _createWorkingDir(newpath,inFilename)\n _copyToNewWorkingDir(new_processing_dir,inFilename)\n os.chdir(new_processing_dir)\n\n # Initialize for later use...\n _mname = None\n _new_asn = None\n _calfiles = []\n\n # Identify WFPC2 inputs to account for differences in WFPC2 inputs\n wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'\n cal_ext = None\n\n # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM\n if '_asn' in inFilename:\n # We are working with an ASN table.\n # Use asnutil code to extract filename\n inFilename = _lowerAsn(inFilename)\n _new_asn = [inFilename]\n _asndict = asnutil.readASNTable(inFilename,None,prodonly=False)\n _cal_prodname = _asndict['output'].lower()\n #_fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])\n\n # Retrieve the first member's rootname for possible use later\n _fimg = fits.open(inFilename, memmap=False)\n for name in _fimg[1].data.field('MEMNAME'):\n if name[-1] != '*':\n _mname = name.split('\\0', 1)[0].lower()\n break\n _fimg.close()\n del _fimg\n\n else:\n # Check to see if input is a _RAW file\n # If it is, strip off the _raw.fits extension...\n _indx = inFilename.find('_raw')\n if _indx < 0: _indx = len(inFilename)\n # ... and build the CALXXX product rootname.\n if wfpc2_input:\n # force code to define _c0m file as calibrated product to be used\n cal_ext = ['_c0m.fits']\n _mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)\n\n _cal_prodname = inFilename[:_indx]\n # Reset inFilename to correspond to appropriate input for\n # drizzle: calibrated product name.\n inFilename = _mname\n\n if _mname is None:\n errorMsg = 'Could not find calibrated product!'\n raise Exception(errorMsg)\n\n # Create trailer filenames based on ASN output filename or\n # on input name for single exposures\n if '_raw' in inFile:\n # Output trailer file to RAW file's trailer\n _trlroot = inFile[:inFile.find('_raw')]\n elif '_asn' in inFile:\n # Output trailer file to ASN file's trailer, not product's trailer\n _trlroot = inFile[:inFile.find('_asn')]\n else:\n # Default: trim off last suffix of input filename\n # and replacing with .tra\n _indx = inFile.rfind('_')\n if _indx > 0:\n _trlroot = inFile[:_indx]\n else:\n _trlroot = inFile\n\n _trlfile = _trlroot + '.tra'\n\n # Open product and read keyword value\n # Check to see if product already exists...\n dkey = 'DRIZCORR'\n # ...if product does NOT exist, interrogate input file\n # to find out whether 'dcorr' has been set to PERFORM\n # Check if user wants to process again regardless of DRIZCORR keyword value\n if force:\n dcorr = 'PERFORM'\n else:\n if _mname :\n _fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)\n _phdr = _fimg['PRIMARY'].header\n if dkey in _phdr:\n dcorr = _phdr[dkey]\n else:\n dcorr = None\n _fimg.close()\n del _fimg\n else:\n dcorr = None\n\n time_str = _getTime()\n _tmptrl = _trlroot + '_tmp.tra'\n _drizfile = _trlroot + '_pydriz'\n _drizlog = _drizfile + \".log\" # the '.log' gets added automatically by astrodrizzle\n _alignlog = _trlroot + '_align.log'\n if dcorr == 'PERFORM':\n if '_asn.fits' not in inFilename:\n # Working with a singleton\n # However, we always want to make sure we always use\n # a calibrated product as input, if available.\n _infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)\n _infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])\n\n _cal_prodname = _infile\n _inlist = _calfiles = [_infile]\n\n # Add CTE corrected filename as additional input if present\n if os.path.exists(_infile_flc) and _infile_flc != _infile:\n _inlist.append(_infile_flc)\n\n else:\n # Working with an ASN table...\n _infile = inFilename\n flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])\n _calfiles = flist\n if len(duplist) > 0:\n origasn = processInput.changeSuffixinASN(inFilename,'flt')\n dupasn = processInput.changeSuffixinASN(inFilename,'flc')\n _inlist = [origasn,dupasn]\n else:\n _inlist = [_infile]\n # We want to keep the original specification of the calibration\n # product name, though, not a lower-case version...\n _cal_prodname = inFilename\n _new_asn.extend(_inlist) # kept so we can delete it when finished\n\n # check to see whether FLC files are also present, and need to be updated\n # generate list of FLC files\n align_files = None\n _calfiles_flc = [f.replace('_flt.fits','_flc.fits') for f in _calfiles]\n # insure these files exist, if not, blank them out\n # Also pick out what files will be used for additional alignment to GAIA\n if not os.path.exists(_calfiles_flc[0]):\n _calfiles_flc = None\n align_files = _calfiles\n align_update_files = None\n else:\n align_files = _calfiles_flc\n align_update_files = _calfiles\n\n # Run updatewcs on each list of images\n updatewcs.updatewcs(_calfiles)\n if _calfiles_flc:\n updatewcs.updatewcs(_calfiles_flc)\n\n if align_to_gaia:\n # Perform additional alignment on the FLC files, if present\n ###############\n #\n # call hlapipeline code here on align_files list of files\n #\n ###############\n # Create trailer marker message for start of align_to_GAIA processing\n _trlmsg = _timestamp(\"Align_to_GAIA started \")\n print(_trlmsg)\n ftmp = open(_tmptrl,'w')\n ftmp.writelines(_trlmsg)\n ftmp.close()\n _appendTrlFile(_trlfile,_tmptrl)\n _trlmsg = \"\"\n\n # Create an empty astropy table so it can be used as input/output for the perform_align function\n #align_table = Table()\n try:\n align_table = alignimages.perform_align(align_files,update_hdr_wcs=True, runfile=_alignlog)\n for row in align_table:\n if row['status'] == 0:\n trlstr = \"Successfully aligned {} to {} astrometric frame\\n\"\n _trlmsg += trlstr.format(row['imageName'], row['catalog'])\n else:\n trlstr = \"Could not align {} to absolute astrometric frame\\n\"\n _trlmsg += trlstr.format(row['imageName'])\n\n except Exception:\n # Something went wrong with alignment to GAIA, so report this in\n # trailer file\n _trlmsg = \"EXCEPTION encountered in alignimages...\\n\"\n _trlmsg += \" No correction to absolute astrometric frame applied!\\n\"\n\n # Write the perform_align log to the trailer file...(this will delete the _alignlog)\n _appendTrlFile(_trlfile,_alignlog)\n\n # Append messages from this calling routine post-perform_align\n ftmp = open(_tmptrl,'w')\n ftmp.writelines(_trlmsg)\n ftmp.close()\n _appendTrlFile(_trlfile,_tmptrl)\n _trlmsg = \"\"\n\n #Check to see whether there are any additional input files that need to\n # be aligned (namely, FLT images)\n if align_update_files and align_table:\n # Apply headerlets from alignment to FLT version of the files\n for fltfile, flcfile in zip(align_update_files, align_files):\n row = align_table[align_table['imageName']==flcfile]\n headerletFile = row['headerletFile'][0]\n if headerletFile != \"None\":\n headerlet.apply_headerlet_as_primary(fltfile, headerletFile,\n attach=True, archive=True)\n # append log file contents to _trlmsg for inclusion in trailer file\n _trlstr = \"Applying headerlet {} as Primary WCS to {}\\n\"\n _trlmsg += _trlstr.format(headerletFile, fltfile)\n else:\n _trlmsg += \"No absolute astrometric headerlet applied to {}\\n\".format(fltfile)\n\n # Finally, append any further messages associated with alignement from this calling routine\n _trlmsg += _timestamp('Align_to_GAIA completed ')\n print(_trlmsg)\n ftmp = open(_tmptrl,'w')\n ftmp.writelines(_trlmsg)\n ftmp.close()\n _appendTrlFile(_trlfile,_tmptrl)\n\n # Run astrodrizzle and send its processing statements to _trlfile\n _pyver = drizzlepac.astrodrizzle.__version__\n\n for _infile in _inlist: # Run astrodrizzle for all inputs\n # Create trailer marker message for start of astrodrizzle processing\n _trlmsg = _timestamp('astrodrizzle started ')\n _trlmsg += __trlmarker__\n _trlmsg += '%s: Processing %s with astrodrizzle Version %s\\n' % (time_str,_infile,_pyver)\n print(_trlmsg)\n\n # Write out trailer comments to trailer file...\n ftmp = open(_tmptrl,'w')\n ftmp.writelines(_trlmsg)\n ftmp.close()\n _appendTrlFile(_trlfile,_tmptrl)\n\n _pyd_err = _trlroot+'_pydriz.stderr'\n\n try:\n b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,\n configobj='defaults',in_memory=inmemory,\n num_cores=num_cores, **pipeline_pars)\n except Exception as errorobj:\n _appendTrlFile(_trlfile,_drizlog)\n _appendTrlFile(_trlfile,_pyd_err)\n _ftrl = open(_trlfile,'a')\n _ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\\n' % _infile)\n _ftrl.write(str(sys.exc_info()[0])+': ')\n _ftrl.writelines(str(errorobj))\n _ftrl.write('\\n')\n _ftrl.close()\n print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)\n raise Exception(str(errorobj))\n\n # Now, append comments created by PyDrizzle to CALXXX trailer file\n print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)\n _appendTrlFile(_trlfile,_drizlog)\n\n # Save this for when astropy.io.fits can modify a file 'in-place'\n # Update calibration switch\n _fimg = fits.open(_cal_prodname, mode='update', memmap=False)\n _fimg['PRIMARY'].header[dkey] = 'COMPLETE'\n _fimg.close()\n del _fimg\n\n # Enforce pipeline convention of all lower-case product\n # names\n _prodlist = glob.glob('*drz.fits')\n for _prodname in _prodlist:\n _plower = _prodname.lower()\n if _prodname != _plower: os.rename(_prodname,_plower)\n\n else:\n # Create default trailer file messages when astrodrizzle is not\n # run on a file. This will typically apply only to BIAS,DARK\n # and other reference images.\n # Start by building up the message...\n _trlmsg = _timestamp('astrodrizzle skipped ')\n _trlmsg = _trlmsg + __trlmarker__\n _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\\n' % (time_str,inFilename)\n _trlmsg = _trlmsg + ' astrodrizzle will not be run at this time.\\n'\n print(_trlmsg)\n\n # Write message out to temp file and append it to full trailer file\n ftmp = open(_tmptrl,'w')\n ftmp.writelines(_trlmsg)\n ftmp.close()\n _appendTrlFile(_trlfile,_tmptrl)\n\n # Append final timestamp to trailer file...\n _final_msg = '%s: Finished processing %s \\n' % (time_str,inFilename)\n _final_msg += _timestamp('astrodrizzle completed ')\n _trlmsg += _final_msg\n ftmp = open(_tmptrl,'w')\n ftmp.writelines(_trlmsg)\n ftmp.close()\n _appendTrlFile(_trlfile,_tmptrl)\n\n # If we created a new ASN table, we need to remove it\n if _new_asn is not None:\n for _name in _new_asn: fileutil.removeFile(_name)\n\n # Clean up any generated OrIg_files directory\n if os.path.exists(\"OrIg_files\"):\n # check to see whether this directory is empty\n flist = glob.glob('OrIg_files/*.fits')\n if len(flist) == 0:\n os.rmdir(\"OrIg_files\")\n else:\n print('OrIg_files directory NOT removed as it still contained images...')\n\n # If headerlets have already been written out by alignment code,\n # do NOT write out this version of the headerlets\n if headerlets:\n # Generate headerlets for each updated FLT image\n hlet_msg = _timestamp(\"Writing Headerlets started\")\n for fname in _calfiles:\n frootname = fileutil.buildNewRootname(fname)\n hname = \"%s_flt_hlet.fits\"%frootname\n # Write out headerlet file used by astrodrizzle, however,\n # do not overwrite any that was already written out by alignimages\n if not os.path.exists(hname):\n hlet_msg += \"Created Headerlet file %s \\n\"%hname\n try:\n headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',\n author=\"OPUS\",descrip=\"Default WCS from Pipeline Calibration\",\n attach=False,clobber=True,logging=False)\n except ValueError:\n hlet_msg += _timestamp(\"SKIPPED: Headerlet not created for %s \\n\"%fname)\n # update trailer file to log creation of headerlet files\n hlet_msg += _timestamp(\"Writing Headerlets completed\")\n ftrl = open(_trlfile,'a')\n ftrl.write(hlet_msg)\n ftrl.close()\n\n # If processing was done in a temp working dir, restore results to original\n # processing directory, return to original working dir and remove temp dir\n if newpath:\n _restoreResults(new_processing_dir,orig_processing_dir)\n os.chdir(orig_processing_dir)\n _removeWorkingDir(new_processing_dir)\n\n # Provide feedback to user\n print(_final_msg)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _lowerAsn(asnfile):\n # Start by creating a new name for the ASN table\n _indx = asnfile.find('_asn.fits')\n _new_asn = asnfile[:_indx]+'_pipeline'+asnfile[_indx:]\n if os.path.exists(_new_asn):\n os.remove(_new_asn)\n # copy original ASN table to new table\n shutil.copy(asnfile,_new_asn)\n\n # Open up the new copy and convert all MEMNAME's to lower-case\n fasn = fits.open(_new_asn, mode='update', memmap=False)\n for i in range(len(fasn[1].data)):\n fasn[1].data[i].setfield('MEMNAME',fasn[1].data[i].field('MEMNAME').lower())\n fasn.close()\n\n return _new_asn", "response": "Create a copy of the original asn file and change the case of all members to lower - case."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nappend drizfile to trlfile from CALXXX.", "response": "def _appendTrlFile(trlfile,drizfile):\n \"\"\" Append drizfile to already existing trlfile from CALXXX.\n \"\"\"\n if not os.path.exists(drizfile):\n return\n # Open already existing CALWF3 trailer file for appending\n ftrl = open(trlfile,'a')\n # Open astrodrizzle trailer file\n fdriz = open(drizfile)\n\n # Read in drizzle comments\n _dlines = fdriz.readlines()\n\n # Append them to CALWF3 trailer file\n ftrl.writelines(_dlines)\n\n # Close all files\n ftrl.close()\n fdriz.close()\n\n # Now, clean up astrodrizzle trailer file\n os.remove(drizfile)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _timestamp(_process_name):\n _prefix= time.strftime(\"%Y%j%H%M%S-I-----\",time.localtime())\n _lenstr = 60 - len(_process_name)\n return _prefix+_process_name+(_lenstr*'-')+'\\n'", "response": "Create formatted time string recognizable by OPUS."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncreating a working directory based on input name under the parent directory specified as rootdir", "response": "def _createWorkingDir(rootdir,input):\n \"\"\"\n Create a working directory based on input name under the parent directory specified as rootdir\n \"\"\"\n # extract rootname from input\n rootname = input[:input.find('_')]\n newdir = os.path.join(rootdir,rootname)\n if not os.path.exists(newdir):\n os.mkdir(newdir)\n return newdir"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncopy input file and all related files needed for processing to the new working directory.", "response": "def _copyToNewWorkingDir(newdir,input):\n \"\"\" Copy input file and all related files necessary for processing to the new working directory.\n\n This function works in a greedy manner, in that all files associated\n with all inputs(have the same rootname) will be copied to the new\n working directory.\n \"\"\"\n flist = []\n if '_asn.fits' in input:\n asndict = asnutil.readASNTable(input,None)\n flist.append(input[:input.find('_')])\n flist.extend(asndict['order'])\n flist.append(asndict['output'])\n else:\n flist.append(input[:input.find('_')])\n # copy all files related to these rootnames into new dir\n for rootname in flist:\n for fname in glob.glob(rootname+'*'):\n shutil.copy(fname,os.path.join(newdir,fname))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _restoreResults(newdir,origdir):\n for fname in glob.glob(os.path.join(newdir,'*')):\n shutil.move(fname,os.path.join(origdir,os.path.basename(fname)))", "response": "Move all files from newdir back to the original directory\n "} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates a median image from the seperately drizzled images.", "response": "def median(input=None, configObj=None, editpars=False, **inputDict):\n \"\"\"\n Create a median image from the seperately drizzled images.\n \"\"\"\n if input is not None:\n inputDict[\"input\"] = input\n else:\n raise ValueError(\"Please supply an input image\")\n\n configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict,\n loadOnly=(not editpars))\n if configObj is None:\n return\n\n if not editpars:\n run(configObj)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef createMedian(imgObjList, configObj, procSteps=None):\n if imgObjList is None:\n msg = \"Please provide a list of imageObjects to the median step\"\n print(msg, file=sys.stderr)\n raise ValueError(msg)\n\n if procSteps is not None:\n procSteps.addStep('Create Median')\n\n step_name = util.getSectionName(configObj, _step_num_)\n if not configObj[step_name]['median']:\n log.info('Median combination step not performed.')\n return\n\n paramDict = configObj[step_name]\n paramDict['proc_unit'] = configObj['proc_unit']\n\n # include whether or not compression was performed\n driz_sep_name = util.getSectionName(configObj, _single_step_num_)\n driz_sep_paramDict = configObj[driz_sep_name]\n paramDict['compress'] = driz_sep_paramDict['driz_sep_compress']\n\n log.info('USER INPUT PARAMETERS for Create Median Step:')\n util.printParams(paramDict, log=log)\n\n _median(imgObjList, paramDict)\n\n if procSteps is not None:\n procSteps.endStep('Create Median')", "response": "This function creates a new top - level node with the median - combine function."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a single image object from the list of image Objects that has been given.", "response": "def _median(imageObjectList, paramDict):\n \"\"\"Create a median image from the list of image Objects\n that has been given.\n \"\"\"\n newmasks = paramDict['median_newmasks']\n comb_type = paramDict['combine_type'].lower()\n nlow = paramDict['combine_nlow']\n nhigh = paramDict['combine_nhigh']\n grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0\n maskpt = paramDict['combine_maskpt']\n proc_units = paramDict['proc_unit']\n compress = paramDict['compress']\n bufsizeMB = paramDict['combine_bufsize']\n\n sigma = paramDict[\"combine_nsigma\"]\n sigmaSplit = sigma.split()\n nsigma1 = float(sigmaSplit[0])\n nsigma2 = float(sigmaSplit[1])\n\n if paramDict['combine_lthresh'] is None:\n lthresh = None\n else:\n lthresh = float(paramDict['combine_lthresh'])\n\n if paramDict['combine_hthresh'] is None:\n hthresh = None\n else:\n hthresh = float(paramDict['combine_hthresh'])\n\n # the name of the output median file isdefined in the output wcs object and\n # stuck in the image.outputValues[\"outMedian\"] dict of every imageObject\n medianfile = imageObjectList[0].outputNames[\"outMedian\"]\n\n # Build combined array from single drizzled images.\n\n # Start by removing any previous products...\n if os.access(medianfile, os.F_OK):\n os.remove(medianfile)\n\n # Define lists for instrument specific parameters, these should be in\n # the image objects need to be passed to the minmed routine\n readnoiseList = []\n exposureTimeList = []\n backgroundValueList = [] # list of MDRIZSKY *platescale values\n singleDrizList = [] # these are the input images\n singleWeightList = [] # pointers to the data arrays\n wht_mean = [] # Compute the mean value of each wht image\n\n single_hdr = None\n virtual = None\n\n # for each image object\n for image in imageObjectList:\n if virtual is None:\n virtual = image.inmemory\n\n det_gain = image.getGain(1)\n img_exptime = image._image['sci', 1]._exptime\n native_units = image.native_units\n native_units_lc = native_units.lower()\n\n if proc_units.lower() == 'native':\n if native_units_lc not in ['counts', 'electrons', 'counts/s',\n 'electrons/s']:\n raise ValueError(\"Unexpected native units: '{}'\"\n .format(native_units))\n\n if lthresh is not None:\n if native_units_lc.startswith('counts'):\n lthresh *= det_gain\n if native_units_lc.endswith('/s'):\n lthresh *= img_exptime\n\n if hthresh is not None:\n if native_units_lc.startswith('counts'):\n hthresh *= det_gain\n if native_units_lc.endswith('/s'):\n hthresh *= img_exptime\n\n singleDriz = image.getOutputName(\"outSingle\")\n singleDriz_name = image.outputNames['outSingle']\n singleWeight = image.getOutputName(\"outSWeight\")\n singleWeight_name = image.outputNames['outSWeight']\n\n # If compression was used, reference ext=1 as CompImageHDU only writes\n # out MEF files, not simple FITS.\n if compress:\n wcs_ext = '[1]'\n wcs_extnum = 1\n else:\n wcs_ext = '[0]'\n wcs_extnum = 0\n\n if not virtual:\n if isinstance(singleDriz, str):\n iter_singleDriz = singleDriz + wcs_ext\n iter_singleWeight = singleWeight + wcs_ext\n else:\n iter_singleDriz = singleDriz[wcs_extnum]\n iter_singleWeight = singleWeight[wcs_extnum]\n else:\n iter_singleDriz = singleDriz_name + wcs_ext\n iter_singleWeight = singleWeight_name + wcs_ext\n\n # read in WCS from first single drizzle image to use as WCS for\n # median image\n if single_hdr is None:\n if virtual:\n single_hdr = singleDriz[wcs_extnum].header\n else:\n single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum,\n memmap=False)\n\n single_image = iterfile.IterFitsFile(iter_singleDriz)\n if virtual:\n single_image.handle = singleDriz\n single_image.inmemory = True\n\n singleDrizList.append(single_image) # add to an array for bookkeeping\n\n # If it exists, extract the corresponding weight images\n if (not virtual and os.access(singleWeight, os.F_OK)) or (\n virtual and singleWeight):\n weight_file = iterfile.IterFitsFile(iter_singleWeight)\n if virtual:\n weight_file.handle = singleWeight\n weight_file.inmemory = True\n\n singleWeightList.append(weight_file)\n try:\n tmp_mean_value = ImageStats(weight_file.data, lower=1e-8,\n fields=\"mean\", nclip=0).mean\n except ValueError:\n tmp_mean_value = 0.0\n wht_mean.append(tmp_mean_value * maskpt)\n\n # Extract instrument specific parameters and place in lists\n\n # If an image has zero exposure time we will\n # redefine that value as '1'. Although this will cause inaccurate\n # scaling of the data to occur in the 'minmed' combination\n # algorith, this is a necessary evil since it avoids divide by\n # zero exceptions. It is more important that the divide by zero\n # exceptions not cause AstroDrizzle to crash in the pipeline than\n # it is to raise an exception for this obviously bad data even\n # though this is not the type of data you would wish to process\n # with AstroDrizzle.\n #\n # Get the exposure time from the InputImage object\n #\n # MRD 19-May-2011\n # Changed exposureTimeList to take exposure time from img_exptime\n # variable instead of hte image._exptime attribute, since\n # image._exptime was just giving 1.\n #\n exposureTimeList.append(img_exptime)\n\n # Use only \"commanded\" chips to extract subtractedSky and rdnoise:\n rdnoise = 0.0\n nchips = 0\n bsky = None # minimum sky across **used** chips\n\n for chip in image.returnAllChips(extname=image.scienceExt):\n # compute sky value as sky/pixel using the single_drz\n # pixel scale:\n if bsky is None or bsky > chip.subtractedSky:\n bsky = chip.subtractedSky * chip._conversionFactor\n\n # Extract the readnoise value for the chip\n rdnoise += chip._rdnoise**2\n nchips += 1\n\n if bsky is None:\n bsky = 0.0\n\n if nchips > 0:\n rdnoise = math.sqrt(rdnoise/nchips)\n\n backgroundValueList.append(bsky)\n readnoiseList.append(rdnoise)\n\n print(\"reference sky value for image '{}' is {}\"\n .format(image._filename, backgroundValueList[-1]))\n #\n # END Loop over input image list\n #\n\n # create an array for the median output image, use the size of the first\n # image in the list. Store other useful image characteristics:\n single_driz_data = singleDrizList[0].data\n data_item_size = single_driz_data.itemsize\n single_data_dtype = single_driz_data.dtype\n imrows, imcols = single_driz_data.shape\n\n medianImageArray = np.zeros_like(single_driz_data)\n\n del single_driz_data\n\n if comb_type == \"minmed\" and not newmasks:\n # Issue a warning if minmed is being run with newmasks turned off.\n print('\\nWARNING: Creating median image without the application of '\n 'bad pixel masks!\\n')\n\n # The overlap value needs to be set to 2*grow in order to\n # avoid edge effects when scrolling down the image, and to\n # insure that the last section returned from the iterator\n # has enough rows to span the kernel used in the boxcar method\n # within minmed.\n overlap = 2 * grow\n buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)\n section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))\n\n if section_nrows == 0:\n buffsize = imcols * data_item_size\n print(\"WARNING: Buffer size is too small to hold a single row.\\n\"\n \" Buffer size size will be increased to minimal \"\n \"required: {}MB\".format(float(buffsize) / 1048576.0))\n section_nrows = 1\n\n if section_nrows < overlap + 1:\n new_grow = int((section_nrows - 1) / 2)\n if section_nrows == imrows:\n print(\"'grow' parameter is too large for actual image size. \"\n \"Reducing 'grow' to {}\".format(new_grow))\n else:\n print(\"'grow' parameter is too large for requested buffer size. \"\n \"Reducing 'grow' to {}\".format(new_grow))\n grow = new_grow\n overlap = 2 * grow\n\n nbr = section_nrows - overlap\n nsec = (imrows - overlap) // nbr\n if (imrows - overlap) % nbr > 0:\n nsec += 1\n\n for k in range(nsec):\n e1 = k * nbr\n e2 = e1 + section_nrows\n u1 = grow\n u2 = u1 + nbr\n\n if k == 0: # first section\n u1 = 0\n\n if k == nsec - 1: # last section\n e2 = min(e2, imrows)\n e1 = min(e1, e2 - overlap - 1)\n u2 = e2 - e1\n\n imdrizSectionsList = np.empty(\n (len(singleDrizList), e2 - e1, imcols),\n dtype=single_data_dtype\n )\n for i, w in enumerate(singleDrizList):\n imdrizSectionsList[i, :, :] = w[e1:e2]\n\n if singleWeightList:\n weightSectionsList = np.empty(\n (len(singleWeightList), e2 - e1, imcols),\n dtype=single_data_dtype\n )\n for i, w in enumerate(singleWeightList):\n weightSectionsList[i, :, :] = w[e1:e2]\n else:\n weightSectionsList = None\n\n weight_mask_list = None\n\n if newmasks and weightSectionsList is not None:\n # Build new masks from single drizzled images.\n # Generate new pixel mask file for median step.\n # This mask will be created from the single-drizzled\n # weight image for this image.\n\n # The mean of the weight array will be computed and all\n # pixels with values less than 0.7 of the mean will be flagged\n # as bad in this mask. This mask will then be used when\n # creating the median image.\n # 0 means good, 1 means bad here...\n weight_mask_list = np.less(\n weightSectionsList,\n np.asarray(wht_mean)[:, None, None]\n ).astype(np.uint8)\n\n if 'minmed' in comb_type: # Do MINMED\n # set up use of 'imedian'/'imean' in minmed algorithm\n fillval = comb_type.startswith('i')\n\n # Create the combined array object using the minmed algorithm\n result = min_med(\n imdrizSectionsList,\n weightSectionsList,\n readnoiseList,\n exposureTimeList,\n backgroundValueList,\n weight_masks=weight_mask_list,\n combine_grow=grow,\n combine_nsigma1=nsigma1,\n combine_nsigma2=nsigma2,\n fillval=fillval\n )\n\n else: # DO NUMCOMBINE\n # Create the combined array object using the numcombine task\n result = numcombine.num_combine(\n imdrizSectionsList,\n masks=weight_mask_list,\n combination_type=comb_type,\n nlow=nlow,\n nhigh=nhigh,\n upper=hthresh,\n lower=lthresh\n )\n\n # Write out the processed image sections to the final output array:\n medianImageArray[e1+u1:e1+u2, :] = result[u1:u2, :]\n\n # Write out the combined image\n # use the header from the first single drizzled image in the list\n pf = _writeImage(medianImageArray, inputHeader=single_hdr)\n\n if virtual:\n mediandict = {}\n mediandict[medianfile] = pf\n for img in imageObjectList:\n img.saveVirtualOutputs(mediandict)\n else:\n try:\n print(\"Saving output median image to: '{}'\".format(medianfile))\n pf.writeto(medianfile)\n except IOError:\n msg = \"Problem writing file '{}'\".format(medianfile)\n print(msg)\n raise IOError(msg)\n\n # Always close any files opened to produce median image; namely,\n # single drizzle images and singly-drizzled weight images\n #\n for img in singleDrizList:\n if not virtual:\n img.close()\n\n # Close all singly drizzled weight images used to create median image.\n for img in singleWeightList:\n if not virtual:\n img.close()"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _writeImage(dataArray=None, inputHeader=None):\n prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader)\n pf = fits.HDUList()\n pf.append(prihdu)\n return pf", "response": "Writes out the result of the combination step."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nperform coordinate transformations between two images using STWCS and full distortion models.", "response": "def tran(inimage,outimage,direction='forward',x=None,y=None,\n coords=None, coordfile=None,colnames=None,separator=None,\n precision=6, output=None,verbose=True):\n \"\"\" Primary interface to perform coordinate transformations in pixel\n coordinates between 2 images using STWCS and full distortion models\n read from each image's header.\n \"\"\"\n single_coord = False\n\n # Only use value provided in `coords` if nothing has been specified for coordfile\n if coords is not None and coordfile is None:\n coordfile = coords\n warnings.simplefilter('always',DeprecationWarning)\n warnings.warn(\"Please update calling code to pass in `coordfile` instead of `coords`.\",\n category=DeprecationWarning)\n warnings.simplefilter('default',DeprecationWarning)\n\n if coordfile is not None:\n if colnames in util.blank_list:\n colnames = ['c1','c2']\n # Determine columns which contain pixel positions\n cols = util.parse_colnames(colnames,coordfile)\n # read in columns from input coordinates file\n xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)\n if xyvals.ndim == 1: # only 1 entry in coordfile\n xlist = [xyvals[0].copy()]\n ylist = [xyvals[1].copy()]\n else:\n xlist = xyvals[:,0].copy()\n ylist = xyvals[:,1].copy()\n del xyvals\n else:\n if isinstance(x,np.ndarray):\n xlist = x.tolist()\n ylist = y.tolist()\n elif not isinstance(x,list):\n xlist = [x]\n ylist = [y]\n single_coord = True\n else:\n xlist = x\n ylist = y\n\n # start by reading in WCS+distortion info for each image\n im1wcs = wcsutil.HSTWCS(inimage)\n if im1wcs.wcs.is_unity():\n print(\"####\\nNo valid input WCS found in {}.\\n Results may be invalid.\\n####\\n\".format(inimage))\n\n if util.is_blank(outimage):\n fname,fextn = fileutil.parseFilename(inimage)\n numsci = fileutil.countExtn(fname)\n chips = []\n for e in range(1,numsci+1):\n chips.append(wcsutil.HSTWCS(fname,ext=('sci',e)))\n if len(chips) == 0:\n chips = [im1wcs]\n im2wcs = distortion.utils.output_wcs(chips)\n else:\n im2wcs = wcsutil.HSTWCS(outimage)\n\n if im2wcs.wcs.is_unity():\n print(\"####\\nNo valid output WCS found in {}.\\n Results may be invalid.\\n####\\n\".format(outimage))\n\n # Setup the transformation\n p2p = wcs_functions.WCSMap(im1wcs,im2wcs)\n\n if direction[0].lower() == 'f':\n outx,outy = p2p.forward(xlist,ylist)\n else:\n outx,outy = p2p.backward(xlist,ylist)\n\n if isinstance(outx,np.ndarray):\n outx = outx.tolist()\n outy = outy.tolist()\n\n # add formatting based on precision here...\n xstr = []\n ystr = []\n fmt = \"%.\"+repr(precision)+\"f\"\n for ox,oy in zip(outx,outy):\n xstr.append(fmt%ox)\n ystr.append(fmt%oy)\n\n if verbose or (not verbose and util.is_blank(output)):\n print('# Coordinate transformations for ',inimage)\n print('# X(in) Y(in) X(out) Y(out)\\n')\n for xs,ys,a,b in zip(xlist,ylist,xstr,ystr):\n print(\"%.4f %.4f %s %s\"%(xs,ys,a,b))\n\n # Create output file, if specified\n if output:\n f = open(output,mode='w')\n f.write(\"# Coordinates converted from %s\\n\"%inimage)\n for xs,ys in zip(xstr,ystr):\n f.write('%s %s\\n'%(xs,ys))\n f.close()\n print('Wrote out results to: ',output)\n\n if single_coord:\n outx = outx[0]\n outy = outy[0]\n return outx,outy"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nfetches the content - addressed URI stored at the given registry URI.", "response": "def fetch_uri_contents(self, uri: str) -> URI:\n \"\"\"\n Return content-addressed URI stored at registry URI.\n \"\"\"\n address, pkg_name, pkg_version = parse_registry_uri(uri)\n self.w3.enable_unstable_package_management_api()\n self.w3.pm.set_registry(address)\n _, _, manifest_uri = self.w3.pm.get_release_data(pkg_name, pkg_version)\n return manifest_uri"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a list of IPFS hash file name and size of a file or directory.", "response": "def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:\n \"\"\"\n Return a dict containing the IPFS hash, file name, and size of a file.\n \"\"\"\n if file_or_dir_path.is_dir():\n asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob(\"*\")]\n elif file_or_dir_path.is_file():\n asset_data = [dummy_ipfs_pin(file_or_dir_path)]\n else:\n raise FileNotFoundError(\n f\"{file_or_dir_path} is not a valid file or directory path.\"\n )\n return asset_data"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncancels a task in the given application.", "response": "async def cancel_task(app: web.Application,\n task: asyncio.Task,\n *args, **kwargs\n ) -> Any:\n \"\"\"\n Convenience function for calling `TaskScheduler.cancel(task)`\n\n This will use the default `TaskScheduler` to cancel the given task.\n\n Example:\n\n import asyncio\n from datetime import datetime\n from brewblox_service import scheduler, service\n\n async def current_time(interval):\n while True:\n await asyncio.sleep(interval)\n print(datetime.now())\n\n\n async def stop_after(app, task, duration):\n await asyncio.sleep(duration)\n await scheduler.cancel_task(app, task)\n print('stopped!')\n\n\n async def start(app):\n # Start first task\n task = await scheduler.create_task(app, current_time(interval=2))\n\n # Start second task to stop the first\n await scheduler.create_task(app, stop_after(app, task, duration=10))\n\n\n app = service.create_app(default_name='example')\n\n scheduler.setup(app)\n app.on_startup.append(start)\n\n service.furnish(app)\n service.run(app)\n \"\"\"\n return await get_scheduler(app).cancel(task, *args, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncleans up the task collection.", "response": "async def _cleanup(self):\n \"\"\"\n Periodically removes completed tasks from the collection,\n allowing fire-and-forget tasks to be garbage collected.\n\n This does not delete the task object, it merely removes the reference in the scheduler.\n \"\"\"\n while True:\n await asyncio.sleep(CLEANUP_INTERVAL_S)\n self._tasks = {t for t in self._tasks if not t.done()}"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\nasync def create(self, coro: Coroutine) -> asyncio.Task:\n task = asyncio.get_event_loop().create_task(coro)\n self._tasks.add(task)\n return task", "response": "Starts execution of a coroutine and returns a Task object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\nasync def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:\n if task is None:\n return\n\n task.cancel()\n\n with suppress(KeyError):\n self._tasks.remove(task)\n\n with suppress(Exception):\n return (await task) if wait_for else None", "response": "Cancels and waits for an asynchronous task to finish."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_content_addressed_github_uri(uri: URI) -> URI:\n if not is_valid_api_github_uri(uri):\n raise CannotHandleURI(f\"{uri} does not conform to Github's API 'url' scheme.\")\n response = requests.get(uri)\n response.raise_for_status()\n contents = json.loads(response.content)\n if contents[\"type\"] != \"file\":\n raise CannotHandleURI(\n f\"Expected url to point to a 'file' type, instead received {contents['type']}.\"\n )\n return contents[\"git_url\"]", "response": "Returns a content - addressed Github URI."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool:\n if not is_text(uri):\n return False\n\n parsed = parse.urlparse(uri)\n path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc\n if not all((path, scheme, authority)):\n return False\n\n if any(term for term in expected_path_terms if term not in path):\n return False\n\n if scheme != \"https\":\n return False\n\n if authority != GITHUB_API_AUTHORITY:\n return False\n return True", "response": "Check if the given URI fulfills the Github specification."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None:\n blob_path = parse.urlparse(blob_uri).path\n blob_hash = blob_path.split(\"/\")[-1]\n contents_str = to_text(contents)\n content_length = len(contents_str)\n hashable_contents = \"blob \" + str(content_length) + \"\\0\" + contents_str\n hash_object = hashlib.sha1(to_bytes(text=hashable_contents))\n if hash_object.hexdigest() != blob_hash:\n raise ValidationError(\n f\"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}.\"\n )", "response": "Validates that the sha1 hash of the contents of the blob_uri is the same as the hash found in blob_uri."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef parse_registry_uri(uri: str) -> RegistryURI:\n validate_registry_uri(uri)\n parsed_uri = parse.urlparse(uri)\n authority = parsed_uri.netloc\n pkg_name = parsed_uri.path.strip(\"/\")\n pkg_version = parsed_uri.query.lstrip(\"version=\").strip(\"/\")\n return RegistryURI(authority, pkg_name, pkg_version)", "response": "Validate and return a RegistryURI from a valid registry URI"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef is_supported_content_addressed_uri(uri: URI) -> bool:\n if not is_ipfs_uri(uri) and not is_valid_content_addressed_github_uri(uri):\n return False\n return True", "response": "Returns a bool indicating whether the provided uri is currently supported."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncreating a BIP122 URI from the given chain_id and resource type and identifier.", "response": "def create_BIP122_uri(\n chain_id: str, resource_type: str, resource_identifier: str\n) -> URI:\n \"\"\"\n See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki\n \"\"\"\n if resource_type != BLOCK:\n raise ValueError(\"Invalid resource_type. Must be one of 'block'\")\n elif not is_block_or_transaction_hash(resource_identifier):\n raise ValueError(\n \"Invalid resource_identifier. Must be a hex encoded 32 byte value\"\n )\n elif not is_block_or_transaction_hash(chain_id):\n raise ValueError(\"Invalid chain_id. Must be a hex encoded 32 byte value\")\n\n return URI(\n parse.urlunsplit(\n [\n \"blockchain\",\n remove_0x_prefix(chain_id),\n f\"{resource_type}/{remove_0x_prefix(resource_identifier)}\",\n \"\",\n \"\",\n ]\n )\n )"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update_w3(self, w3: Web3) -> \"Package\":\n validate_w3_instance(w3)\n return Package(self.manifest, w3, self.uri)", "response": "Update the manifest with the same manifest and uri."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate a new Package instance from a manifest located at the provided Path.", "response": "def from_file(cls, file_path: Path, w3: Web3) -> \"Package\":\n \"\"\"\n Returns a ``Package`` instantiated by a manifest located at the provided Path.\n ``file_path`` arg must be a ``pathlib.Path`` instance.\n A valid ``Web3`` instance is required to instantiate a ``Package``.\n \"\"\"\n if isinstance(file_path, Path):\n raw_manifest = file_path.read_text()\n validate_raw_manifest_format(raw_manifest)\n manifest = json.loads(raw_manifest)\n else:\n raise TypeError(\n \"The Package.from_file method expects a pathlib.Path instance.\"\n f\"Got {type(file_path)} instead.\"\n )\n return cls(manifest, w3, file_path.as_uri())"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate a new Package object from a URI.", "response": "def from_uri(cls, uri: URI, w3: Web3) -> \"Package\":\n \"\"\"\n Returns a Package object instantiated by a manifest located at a content-addressed URI.\n A valid ``Web3`` instance is also required.\n URI schemes supported:\n - IPFS `ipfs://Qm...`\n - HTTP `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha`\n - Registry `ercXXX://registry.eth/greeter?version=1.0.0`\n\n .. code:: python\n\n OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501\n \"\"\"\n contents = to_text(resolve_uri_contents(uri))\n validate_raw_manifest_format(contents)\n manifest = json.loads(contents)\n return cls(manifest, w3, uri)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_contract_factory(self, name: ContractName) -> Contract:\n validate_contract_name(name)\n\n if \"contract_types\" not in self.manifest:\n raise InsufficientAssetsError(\n \"This package does not contain any contract type data.\"\n )\n\n try:\n contract_data = self.manifest[\"contract_types\"][name]\n except KeyError:\n raise InsufficientAssetsError(\n \"This package does not contain any package data to generate \"\n f\"a contract factory for contract type: {name}. Available contract types include: \"\n f\"{ list(self.manifest['contract_types'].keys()) }.\"\n )\n\n validate_minimal_contract_factory_data(contract_data)\n contract_kwargs = generate_contract_factory_kwargs(contract_data)\n contract_factory = self.w3.eth.contract(**contract_kwargs)\n return contract_factory", "response": "Returns a contract factory for a given contract type."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a Web3. contract instance generated from the contract type data available in the package. manifest and the provided address.", "response": "def get_contract_instance(self, name: ContractName, address: Address) -> Contract:\n \"\"\"\n Will return a ``Web3.contract`` instance generated from the contract type data available\n in ``Package.manifest`` and the provided ``address``. The provided ``address`` must be\n valid on the connected chain available through ``Package.w3``.\n \"\"\"\n validate_address(address)\n validate_contract_name(name)\n try:\n self.manifest[\"contract_types\"][name][\"abi\"]\n except KeyError:\n raise InsufficientAssetsError(\n \"Package does not have the ABI required to generate a contract instance \"\n f\"for contract: {name} at address: {address}.\"\n )\n contract_kwargs = generate_contract_factory_kwargs(\n self.manifest[\"contract_types\"][name]\n )\n canonical_address = to_canonical_address(address)\n contract_instance = self.w3.eth.contract(\n address=canonical_address, **contract_kwargs\n )\n return contract_instance"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning a Dependencies instance containing the build dependencies available on this Package.", "response": "def build_dependencies(self) -> \"Dependencies\":\n \"\"\"\n Return `Dependencies` instance containing the build dependencies available on this Package.\n The ``Package`` class should provide access to the full dependency tree.\n\n .. code:: python\n\n >>> owned_package.build_dependencies['zeppelin']\n \n \"\"\"\n validate_build_dependencies_are_present(self.manifest)\n\n dependencies = self.manifest[\"build_dependencies\"]\n dependency_packages = {}\n for name, uri in dependencies.items():\n try:\n validate_build_dependency(name, uri)\n dependency_package = Package.from_uri(uri, self.w3)\n except PyEthPMError as e:\n raise FailureToFetchIPFSAssetsError(\n f\"Failed to retrieve build dependency: {name} from URI: {uri}.\\n\"\n f\"Got error: {e}.\"\n )\n else:\n dependency_packages[name] = dependency_package\n\n return Dependencies(dependency_packages)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef deployments(self) -> Union[\"Deployments\", Dict[None, None]]:\n if not check_for_deployments(self.manifest):\n return {}\n\n all_blockchain_uris = self.manifest[\"deployments\"].keys()\n matching_uri = validate_single_matching_uri(all_blockchain_uris, self.w3)\n\n deployments = self.manifest[\"deployments\"][matching_uri]\n all_contract_factories = {\n deployment_data[\"contract_type\"]: self.get_contract_factory(\n deployment_data[\"contract_type\"]\n )\n for deployment_data in deployments.values()\n }\n validate_deployments_tx_receipt(deployments, self.w3, allow_missing_data=True)\n linked_deployments = get_linked_deployments(deployments)\n if linked_deployments:\n for deployment_data in linked_deployments.values():\n on_chain_bytecode = self.w3.eth.getCode(\n to_canonical_address(deployment_data[\"address\"])\n )\n unresolved_linked_refs = normalize_linked_references(\n deployment_data[\"runtime_bytecode\"][\"link_dependencies\"]\n )\n resolved_linked_refs = tuple(\n self._resolve_linked_references(link_ref, deployments)\n for link_ref in unresolved_linked_refs\n )\n for linked_ref in resolved_linked_refs:\n validate_linked_references(linked_ref, on_chain_bytecode)\n\n return Deployments(deployments, all_contract_factories, self.w3)", "response": "Returns a Deployments object containing all the deployment data and contract factories of a Package. w3 instance."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns the IPFS data as if file was pinned to an actual node.", "response": "def dummy_ipfs_pin(path: Path) -> Dict[str, str]:\n \"\"\"\n Return IPFS data as if file was pinned to an actual node.\n \"\"\"\n ipfs_return = {\n \"Hash\": generate_file_hash(path.read_bytes()),\n \"Name\": path.name,\n \"Size\": str(path.stat().st_size),\n }\n return ipfs_return"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef extract_ipfs_path_from_uri(value: str) -> str:\n parse_result = parse.urlparse(value)\n\n if parse_result.netloc:\n if parse_result.path:\n return \"\".join((parse_result.netloc, parse_result.path.rstrip(\"/\")))\n else:\n return parse_result.netloc\n else:\n return parse_result.path.strip(\"/\")", "response": "Extract the path from an IPFS URI."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef is_ipfs_uri(value: str) -> bool:\n parse_result = parse.urlparse(value)\n if parse_result.scheme != \"ipfs\":\n return False\n if not parse_result.netloc and not parse_result.path:\n return False\n\n return True", "response": "Return a bool indicating whether or not the value is a valid IPFS URI."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_instance(self, contract_name: str) -> None:\n self._validate_name_and_references(contract_name)\n # Use a deployment's \"contract_type\" to lookup contract factory\n # in case the deployment uses a contract alias\n contract_type = self.deployment_data[contract_name][\"contract_type\"]\n factory = self.contract_factories[contract_type]\n address = to_canonical_address(self.deployment_data[contract_name][\"address\"])\n contract_kwargs = {\n \"abi\": factory.abi,\n \"bytecode\": factory.bytecode,\n \"bytecode_runtime\": factory.bytecode_runtime,\n }\n return self.w3.eth.contract(address=address, **contract_kwargs)", "response": "Fetches a contract instance belonging to the deployment."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef validate_address(address: Any) -> None:\n if not is_address(address):\n raise ValidationError(f\"Expected an address, got: {address}\")\n if not is_canonical_address(address):\n raise ValidationError(\n \"Py-EthPM library only accepts canonicalized addresses. \"\n f\"{address} is not in the accepted format.\"\n )", "response": "Raises a ValidationError if an address is not canonicalized."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef validate_empty_bytes(offset: int, length: int, bytecode: bytes) -> None:\n slot_length = offset + length\n slot = bytecode[offset:slot_length]\n if slot != bytearray(length):\n raise ValidationError(\n f\"Bytecode segment: [{offset}:{slot_length}] is not comprised of empty bytes, \"\n f\"rather: {slot}.\"\n )", "response": "Validates that bytecode is comprised of empty bytes."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nvalidates that the given package name is a valid package name.", "response": "def validate_package_name(pkg_name: str) -> None:\n \"\"\"\n Raise an exception if the value is not a valid package name\n as defined in the EthPM-Spec.\n \"\"\"\n if not bool(re.match(PACKAGE_NAME_REGEX, pkg_name)):\n raise ValidationError(f\"{pkg_name} is not a valid package name.\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef validate_registry_uri(uri: str) -> None:\n parsed = parse.urlparse(uri)\n scheme, authority, pkg_name, query = (\n parsed.scheme,\n parsed.netloc,\n parsed.path,\n parsed.query,\n )\n validate_registry_uri_scheme(scheme)\n validate_registry_uri_authority(authority)\n if query:\n validate_registry_uri_version(query)\n validate_package_name(pkg_name[1:])", "response": "Raises an exception if the URI does not conform to the registry URI scheme."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nvalidates that the authority is a valid ENS domain and a valid checksummed contract address.", "response": "def validate_registry_uri_authority(auth: str) -> None:\n \"\"\"\n Raise an exception if the authority is not a valid ENS domain\n or a valid checksummed contract address.\n \"\"\"\n if is_ens_domain(auth) is False and not is_checksum_address(auth):\n raise ValidationError(f\"{auth} is not a valid registry URI authority.\")"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nvalidate that the version param is valid.", "response": "def validate_registry_uri_version(query: str) -> None:\n \"\"\"\n Raise an exception if the version param is malformed.\n \"\"\"\n query_dict = parse.parse_qs(query, keep_blank_values=True)\n if \"version\" not in query_dict:\n raise ValidationError(f\"{query} is not a correctly formatted version param.\")"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nvalidates that a single URI in the list of all_blockchain_uris matches the w3 instance.", "response": "def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str:\n \"\"\"\n Return a single block URI after validating that it is the *only* URI in\n all_blockchain_uris that matches the w3 instance.\n \"\"\"\n matching_uris = [\n uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri)\n ]\n\n if not matching_uris:\n raise ValidationError(\"Package has no matching URIs on chain.\")\n elif len(matching_uris) != 1:\n raise ValidationError(\n f\"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}.\"\n )\n return matching_uris[0]"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nvalidating that every key in meta is one of META_FIELDS and has a value of the expected type.", "response": "def validate_meta_object(meta: Dict[str, Any], allow_extra_meta_fields: bool) -> None:\n \"\"\"\n Validates that every key is one of `META_FIELDS` and has a value of the expected type.\n \"\"\"\n for key, value in meta.items():\n if key in META_FIELDS:\n if type(value) is not META_FIELDS[key]:\n raise ValidationError(\n f\"Values for {key} are expected to have the type {META_FIELDS[key]}, \"\n f\"instead got {type(value)}.\"\n )\n elif allow_extra_meta_fields:\n if key[:2] != \"x-\":\n raise ValidationError(\n \"Undefined meta fields need to begin with 'x-', \"\n f\"{key} is not a valid undefined meta field.\"\n )\n else:\n raise ValidationError(\n f\"{key} is not a permitted meta field. To allow undefined fields, \"\n \"set `allow_extra_meta_fields` to True.\"\n )"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef validate_manifest_against_schema(manifest: Dict[str, Any]) -> None:\n schema_data = _load_schema_data()\n try:\n validate(manifest, schema_data)\n except jsonValidationError as e:\n raise ValidationError(\n f\"Manifest invalid for schema version {schema_data['version']}. \"\n f\"Reason: {e.message}\"\n )", "response": "Validate manifest against schema."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef validate_manifest_deployments(manifest: Dict[str, Any]) -> None:\n if set((\"contract_types\", \"deployments\")).issubset(manifest):\n all_contract_types = list(manifest[\"contract_types\"].keys())\n all_deployments = list(manifest[\"deployments\"].values())\n all_deployment_names = extract_contract_types_from_deployments(all_deployments)\n missing_contract_types = set(all_deployment_names).difference(\n all_contract_types\n )\n if missing_contract_types:\n raise ValidationError(\n f\"Manifest missing references to contracts: {missing_contract_types}.\"\n )", "response": "Validate that a manifest s deployments contracts reference existing contract_types."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef validate_raw_manifest_format(raw_manifest: str) -> None:\n try:\n manifest_dict = json.loads(raw_manifest, encoding=\"UTF-8\")\n except json.JSONDecodeError as err:\n raise json.JSONDecodeError(\n \"Failed to load package data. File is not a valid JSON document.\",\n err.doc,\n err.pos,\n )\n compact_manifest = json.dumps(manifest_dict, sort_keys=True, separators=(\",\", \":\"))\n if raw_manifest != compact_manifest:\n raise ValidationError(\n \"The manifest appears to be malformed. Please ensure that it conforms to the \"\n \"EthPM-Spec for document format. \"\n \"http://ethpm.github.io/ethpm-spec/package-spec.html#document-format \"\n )", "response": "Validate that a manifest is in the correct format."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef build(obj: Dict[str, Any], *fns: Callable[..., Any]) -> Dict[str, Any]:\n return pipe(obj, *fns)", "response": "Wrapper function to pipe manifest through build functions."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_names_and_paths(compiler_output: Dict[str, Any]) -> Dict[str, str]:\n return {\n contract_name: make_path_relative(path)\n for path in compiler_output\n for contract_name in compiler_output[path].keys()\n }", "response": "Return a mapping of contract name to relative path as defined in compiler output."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmake the given path relative to the virtual source tree root directory.", "response": "def make_path_relative(path: str) -> str:\n \"\"\"\n Returns the given path prefixed with \"./\" if the path\n is not already relative in the compiler output.\n \"\"\"\n if \"../\" in path:\n raise ManifestBuildingError(\n f\"Path: {path} appears to be outside of the virtual source tree. \"\n \"Please make sure all sources are within the virtual source tree root directory.\"\n )\n\n if path[:2] != \"./\":\n return f\"./{path}\"\n return path"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a copy of the manifest with contract_data field added to the manifest.", "response": "def contract_type(\n name: str,\n compiler_output: Dict[str, Any],\n alias: Optional[str] = None,\n abi: Optional[bool] = False,\n compiler: Optional[bool] = False,\n contract_type: Optional[bool] = False,\n deployment_bytecode: Optional[bool] = False,\n natspec: Optional[bool] = False,\n runtime_bytecode: Optional[bool] = False,\n) -> Manifest:\n \"\"\"\n Returns a copy of manifest with added contract_data field as specified by kwargs.\n If no kwargs are present, all available contract_data found in the compiler output\n will be included.\n\n To include specific contract_data fields, add kwarg set to True (i.e. `abi=True`)\n To alias a contract_type, include a kwarg `alias` (i.e. `alias=\"OwnedAlias\"`)\n If only an alias kwarg is provided, all available contract data will be included.\n Kwargs must match fields as defined in the EthPM Spec (except \"alias\") if user\n wants to include them in custom contract_type.\n \"\"\"\n contract_type_fields = {\n \"contract_type\": contract_type,\n \"deployment_bytecode\": deployment_bytecode,\n \"runtime_bytecode\": runtime_bytecode,\n \"abi\": abi,\n \"natspec\": natspec,\n \"compiler\": compiler,\n }\n selected_fields = [k for k, v in contract_type_fields.items() if v]\n return _contract_type(name, compiler_output, alias, selected_fields)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns an iterable of tuples that are available in the contract type data.", "response": "def filter_all_data_by_selected_fields(\n all_type_data: Dict[str, Any], selected_fields: List[str]\n) -> Iterable[Tuple[str, Any]]:\n \"\"\"\n Raises exception if selected field data is not available in the contract type data\n automatically gathered by normalize_compiler_output. Otherwise, returns the data.\n \"\"\"\n for field in selected_fields:\n if field in all_type_data:\n yield field, all_type_data[field]\n else:\n raise ManifestBuildingError(\n f\"Selected field: {field} not available in data collected from solc output: \"\n f\"{list(sorted(all_type_data.keys()))}. Please make sure the relevant data \"\n \"is present in your solc output.\"\n )"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef normalize_compiler_output(compiler_output: Dict[str, Any]) -> Dict[str, Any]:\n paths_and_names = [\n (path, contract_name)\n for path in compiler_output\n for contract_name in compiler_output[path].keys()\n ]\n paths, names = zip(*paths_and_names)\n if len(names) != len(set(names)):\n raise ManifestBuildingError(\n \"Duplicate contract names were found in the compiler output.\"\n )\n return {\n name: normalize_contract_type(compiler_output[path][name])\n for path, name in paths_and_names\n }", "response": "Normalizes the compiler output."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef normalize_contract_type(\n contract_type_data: Dict[str, Any]\n) -> Iterable[Tuple[str, Any]]:\n \"\"\"\n Serialize contract_data found in compiler output to the defined fields.\n \"\"\"\n yield \"abi\", contract_type_data[\"abi\"]\n if \"evm\" in contract_type_data:\n if \"bytecode\" in contract_type_data[\"evm\"]:\n yield \"deployment_bytecode\", normalize_bytecode_object(\n contract_type_data[\"evm\"][\"bytecode\"]\n )\n if \"deployedBytecode\" in contract_type_data[\"evm\"]:\n yield \"runtime_bytecode\", normalize_bytecode_object(\n contract_type_data[\"evm\"][\"deployedBytecode\"]\n )\n if any(key in contract_type_data for key in NATSPEC_FIELDS):\n natspec = deep_merge_dicts(\n contract_type_data.get(\"userdoc\", {}), contract_type_data.get(\"devdoc\", {})\n )\n yield \"natspec\", natspec\n # make sure metadata isn't an empty string in solc output\n if \"metadata\" in contract_type_data and contract_type_data[\"metadata\"]:\n yield \"compiler\", normalize_compiler_object(\n json.loads(contract_type_data[\"metadata\"])\n )", "response": "Converts the contract type data to the defined fields."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nprocess the bytecode by converting link_refs in bytecode with 0 s.", "response": "def process_bytecode(link_refs: Dict[str, Any], bytecode: bytes) -> str:\n \"\"\"\n Replace link_refs in bytecode with 0's.\n \"\"\"\n all_offsets = [y for x in link_refs.values() for y in x.values()]\n # Link ref validation.\n validate_link_ref_fns = (\n validate_link_ref(ref[\"start\"] * 2, ref[\"length\"] * 2)\n for ref in concat(all_offsets)\n )\n pipe(bytecode, *validate_link_ref_fns)\n # Convert link_refs in bytecode to 0's\n link_fns = (\n replace_link_ref_in_bytecode(ref[\"start\"] * 2, ref[\"length\"] * 2)\n for ref in concat(all_offsets)\n )\n processed_bytecode = pipe(bytecode, *link_fns)\n return add_0x_prefix(processed_bytecode)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn a callable that allows the user to add deployments of the same type across multiple chains.", "response": "def deployment_type(\n *,\n contract_instance: str,\n contract_type: str,\n deployment_bytecode: Dict[str, Any] = None,\n runtime_bytecode: Dict[str, Any] = None,\n compiler: Dict[str, Any] = None,\n) -> Manifest:\n \"\"\"\n Returns a callable that allows the user to add deployments of the same type\n across multiple chains.\n \"\"\"\n return _deployment_type(\n contract_instance,\n contract_type,\n deployment_bytecode,\n runtime_bytecode,\n compiler,\n )"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef deployment(\n *,\n block_uri: URI,\n contract_instance: str,\n contract_type: str,\n address: HexStr,\n transaction: HexStr = None,\n block: HexStr = None,\n deployment_bytecode: Dict[str, Any] = None,\n runtime_bytecode: Dict[str, Any] = None,\n compiler: Dict[str, Any] = None,\n) -> Manifest:\n \"\"\"\n Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,\n however no validation is provided that this URI is unique amongst the other deployment\n URIs, so the user must take care that each blockchain URI represents a unique blockchain.\n \"\"\"\n return _deployment(\n contract_instance,\n contract_type,\n deployment_bytecode,\n runtime_bytecode,\n compiler,\n block_uri,\n address,\n transaction,\n block,\n )", "response": "Returns a manifest with the newly included deployment."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nbuild a list of deployment data for a specific contract type.", "response": "def _build_deployments_object(\n contract_type: str,\n deployment_bytecode: Dict[str, Any],\n runtime_bytecode: Dict[str, Any],\n compiler: Dict[str, Any],\n address: HexStr,\n tx: HexStr,\n block: HexStr,\n manifest: Dict[str, Any],\n) -> Iterable[Tuple[str, Any]]:\n \"\"\"\n Returns a dict with properly formatted deployment data.\n \"\"\"\n yield \"contract_type\", contract_type\n yield \"address\", to_hex(address)\n if deployment_bytecode:\n yield \"deployment_bytecode\", deployment_bytecode\n if compiler:\n yield \"compiler\", compiler\n if tx:\n yield \"transaction\", tx\n if block:\n yield \"block\", block\n if runtime_bytecode:\n yield \"runtime_bytecode\", runtime_bytecode"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn an initial dict with the minimal requried fields for a valid manifest.", "response": "def init_manifest(\n package_name: str, version: str, manifest_version: Optional[str] = \"2\"\n) -> Dict[str, Any]:\n \"\"\"\n Returns an initial dict with the minimal requried fields for a valid manifest.\n Should only be used as the first fn to be piped into a `build()` pipeline.\n \"\"\"\n return {\n \"package_name\": package_name,\n \"version\": version,\n \"manifest_version\": manifest_version,\n }"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef write_to_disk(\n manifest_root_dir: Optional[Path] = None,\n manifest_name: Optional[str] = None,\n prettify: Optional[bool] = False,\n) -> Manifest:\n \"\"\"\n Write the active manifest to disk\n Defaults\n - Writes manifest to cwd unless Path is provided as manifest_root_dir.\n - Writes manifest with a filename of Manifest[version].json unless a desired\n manifest name (which must end in json) is provided as manifest_name.\n - Writes the minified manifest version to disk unless prettify is set to True.\n \"\"\"\n return _write_to_disk(manifest_root_dir, manifest_name, prettify)", "response": "Writes the manifest to disk unless prettify is set to True."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef pin_to_ipfs(\n manifest: Manifest, *, backend: BaseIPFSBackend, prettify: Optional[bool] = False\n) -> List[Dict[str, str]]:\n \"\"\"\n Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.\n\n `pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin\n data and not the manifest.\n \"\"\"\n contents = format_manifest(manifest, prettify=prettify)\n\n with tempfile.NamedTemporaryFile() as temp:\n temp.write(to_bytes(text=contents))\n temp.seek(0)\n return backend.pin_assets(Path(temp.name))", "response": "Returns the IPFS pin data after pinning the manifest to the provided IPFS backend."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_parser(default_name: str) -> argparse.ArgumentParser:\n argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n argparser.add_argument('-H', '--host',\n help='Host to which the app binds. [%(default)s]',\n default='0.0.0.0')\n argparser.add_argument('-p', '--port',\n help='Port to which the app binds. [%(default)s]',\n default=5000,\n type=int)\n argparser.add_argument('-o', '--output',\n help='Logging output. [%(default)s]')\n argparser.add_argument('-n', '--name',\n help='Service name. This will be used as prefix for all endpoints. [%(default)s]',\n default=default_name)\n argparser.add_argument('--debug',\n help='Run the app in debug mode. [%(default)s]',\n action='store_true')\n argparser.add_argument('--eventbus-host',\n help='Hostname at which the eventbus can be reached [%(default)s]',\n default='eventbus')\n argparser.add_argument('--eventbus-port',\n help='Port at which the eventbus can be reached [%(default)s]',\n default=5672,\n type=int)\n return argparser", "response": "Creates a new argument parser for the brewblox_service command line."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_app(\n default_name: str = None,\n parser: argparse.ArgumentParser = None,\n raw_args: List[str] = None\n) -> web.Application:\n \"\"\"\n Creates and configures an Aiohttp application.\n\n Args:\n default_name (str, optional):\n Default value for the --name commandline argument.\n This value is required if `parser` is not provided.\n This value will be ignored if `parser` is provided.\n\n parser (argparse.ArgumentParser, optional):\n Application-specific parser.\n If not provided, the return value of `create_parser()` will be used.\n\n raw_args (list of str, optional):\n Explicit commandline arguments.\n Defaults to sys.argv[1:]\n\n Returns:\n web.Application: A configured Aiohttp Application object.\n This Application must be furnished, and is not yet running.\n\n \"\"\"\n\n if parser is None:\n assert default_name, 'Default service name is required'\n parser = create_parser(default_name)\n\n args = parser.parse_args(raw_args)\n _init_logging(args)\n\n LOGGER.info(f'Creating [{args.name}] application')\n app = web.Application()\n app['config'] = vars(args)\n return app", "response": "Creates and configures an Aiohttp application."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nconfigure the application for furnishing.", "response": "def furnish(app: web.Application):\n \"\"\"\n Configures Application routes, readying it for running.\n\n This function modifies routes and resources that were added by calling code,\n and must be called immediately prior to `run(app)`.\n\n Args:\n app (web.Application):\n The Aiohttp Application as created by `create_app()`\n \"\"\"\n app_name = app['config']['name']\n prefix = '/' + app_name.lstrip('/')\n app.router.add_routes(routes)\n cors_middleware.enable_cors(app)\n\n # Configure CORS and prefixes on all endpoints.\n known_resources = set()\n for route in list(app.router.routes()):\n if route.resource in known_resources:\n continue\n known_resources.add(route.resource)\n route.resource.add_prefix(prefix)\n\n # Configure swagger settings\n # We set prefix explicitly here\n aiohttp_swagger.setup_swagger(app,\n swagger_url=prefix + '/api/doc',\n description='',\n title=f'Brewblox Service \"{app_name}\"',\n api_version='0.0',\n contact='development@brewpi.com')\n\n LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))\n\n for route in app.router.routes():\n LOGGER.info(f'Endpoint [{route.method}] {route.resource}')\n\n for name, impl in app.get(features.FEATURES_KEY, {}).items():\n LOGGER.info(f'Feature [{name}] {impl}')"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef run(app: web.Application):\n host = app['config']['host']\n port = app['config']['port']\n\n # starts app. run_app() will automatically start the async context.\n web.run_app(app, host=host, port=port)", "response": "Runs the application in an async context."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_linked_deployments(deployments: Dict[str, Any]) -> Dict[str, Any]:\n linked_deployments = {\n dep: data\n for dep, data in deployments.items()\n if get_in((\"runtime_bytecode\", \"link_dependencies\"), data)\n }\n for deployment, data in linked_deployments.items():\n if any(\n link_dep[\"value\"] == deployment\n for link_dep in data[\"runtime_bytecode\"][\"link_dependencies\"]\n ):\n raise BytecodeLinkingError(\n f\"Link dependency found in {deployment} deployment that references its \"\n \"own contract instance, which is disallowed\"\n )\n return linked_deployments", "response": "Returns all deployments that contain link dependencies."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nvalidates that normalized linked_references match the corresponding bytecode.", "response": "def validate_linked_references(\n link_deps: Tuple[Tuple[int, bytes], ...], bytecode: bytes\n) -> None:\n \"\"\"\n Validates that normalized linked_references (offset, expected_bytes)\n match the corresponding bytecode.\n \"\"\"\n offsets, values = zip(*link_deps)\n for idx, offset in enumerate(offsets):\n value = values[idx]\n # https://github.com/python/mypy/issues/4975\n offset_value = int(offset)\n dep_length = len(value)\n end_of_bytes = offset_value + dep_length\n # Ignore b/c whitespace around ':' conflict b/w black & flake8\n actual_bytes = bytecode[offset_value:end_of_bytes] # noqa: E203\n if actual_bytes != values[idx]:\n raise ValidationError(\n \"Error validating linked reference. \"\n f\"Offset: {offset} \"\n f\"Value: {values[idx]} \"\n f\"Bytecode: {bytecode} .\"\n )"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nnormalizes the linked references in a resource tree.", "response": "def normalize_linked_references(\n data: List[Dict[str, Any]]\n) -> Generator[Tuple[int, str, str], None, None]:\n \"\"\"\n Return a tuple of information representing all insertions of a linked reference.\n (offset, type, value)\n \"\"\"\n for deployment in data:\n for offset in deployment[\"offsets\"]:\n yield offset, deployment[\"type\"], deployment[\"value\"]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef validate_deployments_tx_receipt(\n deployments: Dict[str, Any], w3: Web3, allow_missing_data: bool = False\n) -> None:\n \"\"\"\n Validate that address and block hash found in deployment data match what is found on-chain.\n :allow_missing_data: by default, enforces validation of address and blockHash.\n \"\"\"\n # todo: provide hook to lazily look up tx receipt via binary search if missing data\n for name, data in deployments.items():\n if \"transaction\" in data:\n tx_hash = data[\"transaction\"]\n tx_receipt = w3.eth.getTransactionReceipt(tx_hash)\n # tx_address will be None if contract created via contract factory\n tx_address = tx_receipt[\"contractAddress\"]\n\n if tx_address is None and allow_missing_data is False:\n raise ValidationError(\n \"No contract address found in tx receipt. Unable to verify \"\n \"address found in tx receipt matches address in manifest's deployment data. \"\n \"If this validation is not necessary, please enable `allow_missing_data` arg. \"\n )\n\n if tx_address is not None and not is_same_address(\n tx_address, data[\"address\"]\n ):\n raise ValidationError(\n f\"Error validating tx_receipt for {name} deployment. \"\n f\"Address found in manifest's deployment data: {data['address']} \"\n f\"Does not match address found on tx_receipt: {tx_address}.\"\n )\n\n if \"block\" in data:\n if tx_receipt[\"blockHash\"] != to_bytes(hexstr=data[\"block\"]):\n raise ValidationError(\n f\"Error validating tx_receipt for {name} deployment. \"\n f\"Block found in manifest's deployment data: {data['block']} does not \"\n f\"Does not match block found on tx_receipt: {tx_receipt['blockHash']}.\"\n )\n elif allow_missing_data is False:\n raise ValidationError(\n \"No block hash found in deployment data. \"\n \"Unable to verify block hash on tx receipt. \"\n \"If this validation is not necessary, please enable `allow_missing_data` arg.\"\n )\n elif allow_missing_data is False:\n raise ValidationError(\n \"No transaction hash found in deployment data. \"\n \"Unable to validate tx_receipt. \"\n \"If this validation is not necessary, please enable `allow_missing_data` arg.\"\n )", "response": "Validate that the transaction receipt for the given deployment data matches the address and block hash found on - chain."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef is_prelinked_bytecode(bytecode: bytes, link_refs: List[Dict[str, Any]]) -> bool:\n for link_ref in link_refs:\n for offset in link_ref[\"offsets\"]:\n try:\n validate_empty_bytes(offset, link_ref[\"length\"], bytecode)\n except ValidationError:\n return True\n return False", "response": "Returns True if the bytecode is pre - linked."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\napply all link references corresponding to a valid attr_dict to the bytecode.", "response": "def apply_all_link_refs(\n bytecode: bytes, link_refs: List[Dict[str, Any]], attr_dict: Dict[str, str]\n) -> bytes:\n \"\"\"\n Applies all link references corresponding to a valid attr_dict to the bytecode.\n \"\"\"\n if link_refs is None:\n return bytecode\n link_fns = (\n apply_link_ref(offset, ref[\"length\"], attr_dict[ref[\"name\"]])\n for ref in link_refs\n for offset in ref[\"offsets\"]\n )\n linked_bytecode = pipe(bytecode, *link_fns)\n return linked_bytecode"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the new bytecode with value put into the location indicated by offset and length.", "response": "def apply_link_ref(offset: int, length: int, value: bytes, bytecode: bytes) -> bytes:\n \"\"\"\n Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.\n \"\"\"\n try:\n validate_empty_bytes(offset, length, bytecode)\n except ValidationError:\n raise BytecodeLinkingError(\"Link references cannot be applied to bytecode\")\n\n new_bytes = (\n # Ignore linting error b/c conflict b/w black & flake8\n bytecode[:offset]\n + value\n + bytecode[offset + length :] # noqa: E201, E203\n )\n return new_bytes"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef link_bytecode(cls, attr_dict: Dict[str, str]) -> Type[\"LinkableContract\"]:\n if not cls.unlinked_references and not cls.linked_references:\n raise BytecodeLinkingError(\"Contract factory has no linkable bytecode.\")\n if not cls.needs_bytecode_linking:\n raise BytecodeLinkingError(\n \"Bytecode for this contract factory does not require bytecode linking.\"\n )\n cls.validate_attr_dict(attr_dict)\n bytecode = apply_all_link_refs(cls.bytecode, cls.unlinked_references, attr_dict)\n runtime = apply_all_link_refs(\n cls.bytecode_runtime, cls.linked_references, attr_dict\n )\n linked_class = cls.factory(\n cls.web3, bytecode_runtime=runtime, bytecode=bytecode\n )\n if linked_class.needs_bytecode_linking:\n raise BytecodeLinkingError(\n \"Expected class to be fully linked, but class still needs bytecode linking.\"\n )\n return linked_class", "response": "Returns a cloned contract factory with the deployment / runtime bytecode linked."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef validate_attr_dict(self, attr_dict: Dict[str, str]) -> None:\n attr_dict_names = list(attr_dict.keys())\n\n if not self.unlinked_references and not self.linked_references:\n raise BytecodeLinkingError(\n \"Unable to validate attr dict, this contract has no linked/unlinked references.\"\n )\n\n unlinked_refs = self.unlinked_references or ({},)\n linked_refs = self.linked_references or ({},)\n all_link_refs = unlinked_refs + linked_refs\n\n all_link_names = [ref[\"name\"] for ref in all_link_refs]\n if set(attr_dict_names) != set(all_link_names):\n raise BytecodeLinkingError(\n \"All link references must be defined when calling \"\n \"`link_bytecode` on a contract factory.\"\n )\n for address in attr_dict.values():\n if not is_canonical_address(address):\n raise BytecodeLinkingError(\n f\"Address: {address} as specified in the attr_dict is not \"\n \"a valid canoncial address.\"\n )", "response": "Validates that the given attr_dict contains all of the attributes that are not already defined in the manifest."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns an iterable containing package name and corresponding Package instance that are available.", "response": "def items(self) -> Tuple[Tuple[str, \"Package\"], ...]: # type: ignore\n \"\"\"\n Return an iterable containing package name and\n corresponding `Package` instance that are available.\n \"\"\"\n item_dict = {\n name: self.build_dependencies.get(name) for name in self.build_dependencies\n }\n return tuple(item_dict.items())"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns an iterable of the available Package instances.", "response": "def values(self) -> List[\"Package\"]: # type: ignore\n \"\"\"\n Return an iterable of the available `Package` instances.\n \"\"\"\n values = [self.build_dependencies.get(name) for name in self.build_dependencies]\n return values"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the dependency Package for a given package name.", "response": "def get_dependency_package(\n self, package_name: str\n ) -> \"Package\": # type: ignore # noqa: F821\n \"\"\"\n Return the dependency Package for a given package name.\n \"\"\"\n self._validate_name(package_name)\n return self.build_dependencies.get(package_name)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef add(app: web.Application,\n feature: Any,\n key: Hashable = None,\n exist_ok: bool = False\n ):\n \"\"\"\n Adds a new feature to the app.\n\n Features can either be registered as the default feature for the class,\n or be given an explicit name.\n\n Args:\n app (web.Application):\n The current Aiohttp application.\n\n feature (Any):\n The new feature that should be registered.\n It is recommended, but not required to use a `ServiceFeature`.\n\n key (Hashable, optional):\n The key under which the feature should be registered.\n Defaults to `type(feature)`.\n\n exist_ok (bool):\n If truthy, this function will do nothing if a feature was already registered for `key`.\n Otherwise, an exception is raised.\n\n \"\"\"\n if FEATURES_KEY not in app:\n app[FEATURES_KEY] = dict()\n\n key = key or type(feature)\n\n if key in app[FEATURES_KEY]:\n if exist_ok:\n return\n else:\n raise KeyError(f'Feature \"{key}\" already registered')\n\n app[FEATURES_KEY][key] = feature", "response": "Adds a new feature to the current Aiohttp application."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the current Aiohttp application s declared feature.", "response": "def get(app: web.Application,\n feature_type: Type[Any] = None,\n key: Hashable = None\n ) -> Any:\n \"\"\"\n Finds declared feature.\n Identification is done based on feature type and key.\n\n Args:\n app (web.Application):\n The current Aiohttp application.\n\n feature_type (Type[Any]):\n The Python type of the desired feature.\n If specified, it will be checked against the found feature.\n\n key (Hashable):\n A specific identifier for the desired feature.\n Defaults to `feature_type`\n\n Returns:\n Any: The feature found for the combination of `feature_type` and `key`\n \"\"\"\n key = key or feature_type\n\n if not key:\n raise AssertionError('No feature identifier provided')\n\n try:\n found = app[FEATURES_KEY][key]\n except KeyError:\n raise KeyError(f'No feature found for \"{key}\"')\n\n if feature_type and not isinstance(found, feature_type):\n raise AssertionError(f'Found {found} did not match type \"{feature_type}\"')\n\n return found"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef validate_minimal_contract_factory_data(contract_data: Dict[str, str]) -> None:\n if not all(key in contract_data.keys() for key in (\"abi\", \"deployment_bytecode\")):\n raise InsufficientAssetsError(\n \"Minimum required contract data to generate a deployable \"\n \"contract factory (abi & deployment_bytecode) not found.\"\n )", "response": "Validate that the contract data in a package contains at least an abi and deployment_bytecode."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef generate_contract_factory_kwargs(\n contract_data: Dict[str, Any]\n) -> Generator[Tuple[str, Any], None, None]:\n \"\"\"\n Build a dictionary of kwargs to be passed into contract factory.\n \"\"\"\n if \"abi\" in contract_data:\n yield \"abi\", contract_data[\"abi\"]\n\n if \"deployment_bytecode\" in contract_data:\n yield \"bytecode\", contract_data[\"deployment_bytecode\"][\"bytecode\"]\n if \"link_references\" in contract_data[\"deployment_bytecode\"]:\n yield \"unlinked_references\", tuple(\n contract_data[\"deployment_bytecode\"][\"link_references\"]\n )\n\n if \"runtime_bytecode\" in contract_data:\n yield \"bytecode_runtime\", contract_data[\"runtime_bytecode\"][\"bytecode\"]\n if \"link_references\" in contract_data[\"runtime_bytecode\"]:\n yield \"linked_references\", tuple(\n contract_data[\"runtime_bytecode\"][\"link_references\"]\n )", "response": "Generates a dictionary of kwargs to be passed into contract factory."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\npublish a new event to the event bus.", "response": "async def post_publish(request):\n \"\"\"\n ---\n tags:\n - Events\n summary: Publish event.\n description: Publish a new event message to the event bus.\n operationId: events.publish\n produces:\n - text/plain\n parameters:\n -\n in: body\n name: body\n description: Event message\n required: true\n schema:\n type: object\n properties:\n exchange:\n type: string\n routing:\n type: string\n message:\n type: object\n \"\"\"\n args = await request.json()\n try:\n await get_publisher(request.app).publish(\n args['exchange'],\n args['routing'],\n args['message']\n )\n return web.Response()\n\n except Exception as ex:\n warnings.warn(f'Unable to publish {args}: {ex}')\n return web.Response(body='Event bus connection refused', status=500)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nrelays incoming messages between the queue and the user callback", "response": "async def _relay(self,\n channel: aioamqp.channel.Channel,\n body: str,\n envelope: aioamqp.envelope.Envelope,\n properties: aioamqp.properties.Properties):\n \"\"\"Relays incoming messages between the queue and the user callback\"\"\"\n try:\n await channel.basic_client_ack(envelope.delivery_tag)\n await self.on_message(self, envelope.routing_key, json.loads(body))\n except Exception as ex:\n LOGGER.error(f'Exception relaying message in {self}: {ex}')"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nensure that the listener task only runs when actually needed. This function is a no-op if any of the preconditions is not met. Preconditions are: * The application is running (self._loop is set) * The task is not already running * There are subscriptions: either pending, or active", "response": "def _lazy_listen(self):\n \"\"\"\n Ensures that the listener task only runs when actually needed.\n This function is a no-op if any of the preconditions is not met.\n\n Preconditions are:\n * The application is running (self._loop is set)\n * The task is not already running\n * There are subscriptions: either pending, or active\n \"\"\"\n if all([\n self._loop,\n not self.running,\n self._subscriptions or (self._pending and not self._pending.empty()),\n ]):\n self._task = self._loop.create_task(self._listen())"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nadd a new event subscription to the listener.", "response": "def subscribe(self,\n exchange_name: str,\n routing: str,\n exchange_type: ExchangeType_ = 'topic',\n on_message: EVENT_CALLBACK_ = None\n ) -> EventSubscription:\n \"\"\"Adds a new event subscription to the listener.\n\n Actual queue declaration to the remote message server is done when connected.\n If the listener is not currently connected, it defers declaration.\n\n All existing subscriptions are redeclared on the remote if `EventListener`\n loses and recreates the connection.\n\n Args:\n exchange_name (str):\n Name of the AMQP exchange. Messages are always published to a specific exchange.\n\n routing (str):\n Filter messages passing through the exchange.\n A routing key is a '.'-separated string, and accepts '#' and '*' wildcards.\n\n exchange_type (ExchangeType_, optional):\n If the exchange does not yet exist, it will be created with this type.\n Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`.\n\n on_message (EVENT_CALLBACK_, optional):\n The function to be called when a new message is received.\n If `on_message` is none, it will default to logging the message.\n\n Returns:\n EventSubscription:\n The newly created subscription.\n This value can safely be discarded: EventListener keeps its own reference.\n \"\"\"\n sub = EventSubscription(\n exchange_name,\n routing,\n exchange_type,\n on_message=on_message\n )\n\n if self._pending is not None:\n self._pending.put_nowait(sub)\n else:\n self._pending_pre_async.append(sub)\n LOGGER.info(f'Deferred event bus subscription: [{sub}]')\n\n self._lazy_listen()\n return sub"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\npublish a new event message to an existing exchange.", "response": "async def publish(self,\n exchange: str,\n routing: str,\n message: Union[str, dict],\n exchange_type: ExchangeType_ = 'topic'):\n \"\"\"\n Publish a new event message.\n\n Connections are created automatically when calling `publish()`,\n and will attempt to reconnect if connection was lost.\n\n For more information on publishing AMQP messages,\n see https://www.rabbitmq.com/tutorials/tutorial-three-python.html\n\n Args:\n exchange (str):\n The AMQP message exchange to publish the message to.\n A new exchange will be created if it does not yet exist.\n\n routing (str):\n The routing identification with which the message should be published.\n Subscribers use routing information for fine-grained filtering.\n Routing can be expressed as a '.'-separated path.\n\n message (Union[str, dict]):\n The message body. It will be serialized before transmission.\n\n exchange_type (ExchangeType_, optional):\n When publishing to a previously undeclared exchange, it will be created.\n `exchange_type` defines how the exchange distributes messages between subscribers.\n The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'.\n\n Raises:\n aioamqp.exceptions.AioamqpException:\n * Failed to connect to AMQP host\n * Failed to send message\n * `exchange` already exists, but has a different `exchange_type`\n \"\"\"\n try:\n await self._ensure_channel()\n except Exception:\n # If server has restarted since our last attempt, ensure channel will fail (old connection invalid)\n # Retry once to check whether a new connection can be made\n await self._ensure_channel()\n\n # json.dumps() also correctly handles strings\n data = json.dumps(message).encode()\n\n await self._channel.exchange_declare(\n exchange_name=exchange,\n type_name=exchange_type,\n auto_delete=True\n )\n\n await self._channel.basic_publish(\n payload=data,\n exchange_name=exchange,\n routing_key=routing\n )"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef is_ens_domain(authority: str) -> bool:\n # check that authority ends with the tld '.eth'\n # check that there are either 2 or 3 subdomains in the authority\n # i.e. zeppelinos.eth or packages.zeppelinos.eth\n if authority[-4:] != \".eth\" or len(authority.split(\".\")) not in [2, 3]:\n return False\n return True", "response": "Check that the authority is a valid ENS domain."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef splitPrefix(name):\n if isinstance(name, basestring) \\\n and ':' in name:\n return tuple(name.split(':', 1))\n else:\n return (None, name)", "response": "Splits the name into a tuple containing the first element in\n and the second element in\n ."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef unwrap(self, d, item):\n nopt = ( lambda x: x )\n try:\n md = d.__metadata__\n pmd = getattr(md, '__print__', None)\n if pmd is None:\n return item\n wrappers = getattr(pmd, 'wrappers', {})\n fn = wrappers.get(item[0], nopt)\n return (item[0], fn(item[1]))\n except:\n pass\n return item", "response": "translate ( unwrap ) using an optional wrapper function"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef exclude(self, d, item):\n try:\n md = d.__metadata__\n pmd = getattr(md, '__print__', None)\n if pmd is None:\n return False\n excludes = getattr(pmd, 'excludes', [])\n return ( item[0] in excludes ) \n except:\n pass\n return False", "response": "check metadata for excluded items"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef mangle(self, name, x):\n h = abs(hash(name))\n return '%s-%s' % (h, x)", "response": "Mangle the name by hashing the name and appending the I { x }."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nfind the definition object by name.", "response": "def find(self, name, resolved=True):\n \"\"\"\n Get the definition object for the schema object by name.\n @param name: The name of a schema object.\n @type name: basestring\n @param resolved: A flag indicating that the fully resolved type\n should be returned.\n @type resolved: boolean\n @return: The found schema I{type}\n @rtype: L{xsd.sxbase.SchemaObject}\n \"\"\"\n #log.debug('searching schema for (%s)', name)\n qref = qualify(name, self.schema.root, self.schema.tns)\n query = BlindQuery(qref)\n result = query.execute(self.schema)\n if result is None:\n log.error('(%s) not-found', name)\n return None\n #log.debug('found (%s) as (%s)', name, Repr(result))\n if resolved:\n result = result.resolve()\n return result"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef push(self, x):\n if isinstance(x, Frame):\n frame = x\n else:\n frame = Frame(x)\n self.stack.append(frame)\n #log.debug('push: (%s)\\n%s', Repr(frame), Repr(self.stack))\n return frame", "response": "Push an I{object onto the stack."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef pop(self):\n if len(self.stack): \n popped = self.stack.pop()\n #log.debug('pop: (%s)\\n%s', Repr(popped), Repr(self.stack))\n return popped\n else:\n #log.debug('stack empty, not-popped')\n pass\n return None", "response": "Pop the frame at the top of the stack."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nget a child by name", "response": "def getchild(self, name, parent):\n \"\"\" get a child by name \"\"\"\n #log.debug('searching parent (%s) for (%s)', Repr(parent), name)\n if name.startswith('@'):\n return parent.get_attribute(name[1:])\n else:\n return parent.get_child(name)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef query(self, name, node):\n #log.debug('searching schema for (%s)', name)\n qref = qualify(name, node, node.namespace())\n query = BlindQuery(qref)\n result = query.execute(self.schema)\n return (result, [])", "response": "blindly query the schema by name and node"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nresolving soap body parts by cross - referencing with operation defined in portType.", "response": "def resolvesoapbody(self, definitions, op):\n \"\"\"\n Resolve soap body I{message} parts by \n cross-referencing with operation defined in port type.\n @param definitions: A definitions object.\n @type definitions: L{Definitions}\n @param op: An I{operation} object.\n @type op: I{operation}\n \"\"\"\n ptop = self.type.operation(op.name)\n if ptop is None:\n raise Exception, \\\n \"operation '%s' not defined in portType\" % op.name\n soap = op.soap\n parts = soap.input.body.parts\n if len(parts):\n pts = []\n for p in ptop.input.parts:\n if p.name in parts:\n pts.append(p)\n soap.input.body.parts = pts\n else:\n soap.input.body.parts = ptop.input.parts\n parts = soap.output.body.parts\n if len(parts):\n pts = []\n for p in ptop.output.parts:\n if p.name in parts:\n pts.append(p)\n soap.output.body.parts = pts\n else:\n soap.output.body.parts = ptop.output.parts"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef resolveheaders(self, definitions, op):\n soap = op.soap\n headers = soap.input.headers + soap.output.headers\n for header in headers:\n mn = header.message\n ref = qualify(mn, self.root, definitions.tns)\n message = definitions.messages.get(ref)\n if message is None:\n raise Exception, \"message'%s', not-found\" % mn\n pn = header.part\n for p in message.parts:\n if p.name == pn:\n header.part = p\n break\n if pn == header.part:\n raise Exception, \\\n \"message '%s' has not part named '%s'\" % (ref, pn)", "response": "Resolve soap header I { message references."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nresolves faults by cross - referencing with operation defined in portType.", "response": "def resolvefaults(self, definitions, op):\n \"\"\"\n Resolve soap fault I{message} references by\n cross-referencing with operation defined in port type.\n @param definitions: A definitions object.\n @type definitions: L{Definitions}\n @param op: An I{operation} object.\n @type op: I{operation}\n \"\"\"\n ptop = self.type.operation(op.name)\n if ptop is None:\n raise Exception, \\\n \"operation '%s' not defined in portType\" % op.name\n soap = op.soap\n for fault in soap.faults:\n for f in ptop.faults:\n if f.name == fault.name:\n fault.parts = f.message.parts\n continue\n if hasattr(fault, 'parts'):\n continue\n raise Exception, \\\n \"fault '%s' not defined in portType '%s'\" % (fault.name, self.type.name)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nfind the specified location in the store.", "response": "def find(self, location):\n \"\"\"\n Find the specified location in the store.\n @param location: The I{location} part of a URL.\n @type location: str\n @return: An input stream to the document.\n @rtype: StringIO\n \"\"\"\n try:\n content = self.store[location]\n return StringIO(content)\n except:\n reason = 'location \"%s\" not in document store' % location\n raise Exception, reason"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef publictypes(self):\n for t in self.wsdl.schema.types.values():\n if t in self.params: continue\n if t in self.types: continue\n item = (t, t)\n self.types.append(item)\n tc = lambda x,y: cmp(x[0].name, y[0].name)\n self.types.sort(cmp=tc)", "response": "get all public types"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef encode(self, s):\n if isinstance(s, basestring) and self.needsEncoding(s):\n for x in self.encodings:\n s = s.replace(x[0], x[1])\n return s", "response": "Encode special characters found in string I { s.\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef decode(self, s):\n if isinstance(s, basestring) and '&' in s:\n for x in self.decodings:\n s = s.replace(x[0], x[1])\n return s", "response": "Decode special characters encodings found in string I { s.\n "} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nparsing a python I{file - like object and a string - like object.", "response": "def parse(self, file=None, string=None):\n \"\"\"\n SAX parse XML text.\n @param file: Parse a python I{file-like} object.\n @type file: I{file-like} object.\n @param string: Parse string XML.\n @type string: str\n \"\"\"\n timer = metrics.Timer()\n timer.start()\n sax, handler = self.saxparser()\n if file is not None:\n sax.parse(file)\n timer.stop()\n metrics.log.debug('sax (%s) duration: %s', file, timer)\n return handler.nodes[0]\n if string is not None:\n source = InputSource(None)\n source.setByteStream(StringIO(string))\n sax.parse(source)\n timer.stop()\n metrics.log.debug('%s\\nsax duration: %s', string, timer)\n return handler.nodes[0]"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_message(self, method, args, kwargs, options=None):\n\n content = self.headercontent(method, options=options)\n header = self.header(content)\n content = self.bodycontent(method, args, kwargs)\n body = self.body(content)\n env = self.envelope(header, body)\n if self.options().prefixes:\n body.normalizePrefixes()\n env.promotePrefixes()\n else:\n env.refitPrefixes()\n return Document(env)", "response": "Get the soap message for the specified method args and kwargs."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef invoke(self, args, kwargs):\n timer = metrics.Timer()\n timer.start()\n result = None\n binding = self.method.binding.input\n soapenv = binding.get_message(self.method, args, kwargs,\n options=self.options)\n timer.stop()\n metrics.log.debug(\n \"message for '%s' created: %s\",\n self.method.name,\n timer)\n timer.start()\n result = self.send(soapenv)\n timer.stop()\n metrics.log.debug(\n \"method '%s' invoked: %s\",\n self.method.name,\n timer)\n return result", "response": "Send the required soap message to invoke the specified method\n "} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef send(self, soapenv):\n result = None\n location = self.location()\n binding = self.method.binding.input\n transport = self.options.transport\n retxml = self.options.retxml\n prettyxml = self.options.prettyxml\n log.debug('sending to (%s)\\nmessage:\\n%s', location, soapenv)\n try:\n self.last_sent(soapenv)\n plugins = PluginContainer(self.options.plugins)\n plugins.message.marshalled(envelope=soapenv.root())\n if prettyxml:\n soapenv = soapenv.str()\n else:\n soapenv = soapenv.plain()\n soapenv = soapenv.encode('utf-8')\n plugins.message.sending(envelope=soapenv)\n request = Request(location, soapenv)\n request.headers = self.headers()\n reply = transport.send(request)\n ctx = plugins.message.received(reply=reply.message)\n reply.message = ctx.reply\n if retxml:\n result = reply.message\n else:\n timer = metrics.Timer()\n timer.start()\n result = self.succeeded(binding, reply.message)\n #cProfile.runctx(\"result = self.succeeded(binding, reply.message)\", globals(), locals(), \"unmarshal_prof\")\n timer.stop()\n metrics.log.debug(\n \"succeeded took: %s\",\n timer)\n except TransportError, e:\n if e.httpcode in (202,204):\n result = None\n else:\n log.error(self.last_sent())\n result = self.failed(binding, e)\n return result", "response": "Send a SOAP message to the server."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef headers(self):\n action = self.method.soap.action\n stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }\n result = dict(stock, **self.options.headers)\n log.debug('headers = %s', result)\n return result", "response": "Get http headers or the http https request."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nprocess the reply and return the result.", "response": "def succeeded(self, binding, reply):\n \"\"\"\n Request succeeded, process the reply\n @param binding: The binding to be used to process the reply.\n @type binding: L{bindings.binding.Binding}\n @param reply: The raw reply text.\n @type reply: str\n @return: The method result.\n @rtype: I{builtin}, L{Object}\n @raise WebFault: On server.\n \"\"\"\n log.debug('http succeeded:\\n%s', reply)\n plugins = PluginContainer(self.options.plugins)\n if len(reply) > 0:\n with LocalTimer() as lt:\n reply, result = binding.get_reply(self.method, reply)\n self.last_received(reply)\n metrics.log.debug(\"Calling binding.get_reply took: %.03f\" % lt.interval)\n else:\n result = None\n ctx = plugins.message.unmarshalled(reply=result)\n result = ctx.reply\n if self.options.faults:\n return result\n else:\n return (200, result)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef failed(self, binding, error):\n status, reason = (error.httpcode, tostr(error))\n reply = error.fp.read()\n log.debug('http failed:\\n%s', reply)\n if status == 500:\n if len(reply) > 0:\n r, p = binding.get_fault(reply)\n self.last_received(r)\n return (status, p)\n else:\n return (status, None)\n if self.options.faults:\n raise HttpWebFault(status, reason)\n else:\n return (status, None)", "response": "Process the response from the server based on the reason."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the line number parsed from the comment or 0.", "response": "def _get_line_no_from_comments(py_line):\n \"\"\"Return the line number parsed from the comment or 0.\"\"\"\n matched = LINECOL_COMMENT_RE.match(py_line)\n if matched:\n return int(matched.group(1))\n else:\n return 0"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _find_bounds(py_line_no, py_by_line_no, cheetah_by_line_no):\n # Find lower bound\n for line_no in range(py_line_no, 0, -1):\n lower_bound = _get_line_no_from_comments(py_by_line_no[line_no])\n if lower_bound != 0:\n break\n else:\n lower_bound = 0\n\n # Find upper bound\n for line_no in range(py_line_no, len(py_by_line_no)):\n upper_bound = _get_line_no_from_comments(py_by_line_no[line_no])\n if upper_bound != 0:\n # Since we'll eventually be building a range(), let's make this\n # the non-inclusive upper-bound\n upper_bound += 1\n break\n else:\n upper_bound = len(cheetah_by_line_no)\n\n return lower_bound, upper_bound", "response": "Searches before and after in the python source to find comments which\n denote cheetah line numbers."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nattempt to fuzzily find matching lines.", "response": "def _find_fuzzy_line(\n py_line_no, py_by_line_no, cheetah_by_line_no, prefer_first\n):\n \"\"\"Attempt to fuzzily find matching lines.\"\"\"\n stripped_line = _fuzz_py_line(py_by_line_no[py_line_no])\n cheetah_lower_bound, cheetah_upper_bound = _find_bounds(\n py_line_no, py_by_line_no, cheetah_by_line_no,\n )\n\n sliced = list(enumerate(cheetah_by_line_no))[\n cheetah_lower_bound:cheetah_upper_bound\n ]\n if not prefer_first:\n sliced = reversed(sliced)\n\n for line_no, line in sliced:\n if stripped_line in _fuzz_cheetah_line(line):\n return line_no\n else:\n # We've failed to find a matching line\n return 0"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nperforming a step of the transformation.", "response": "def perform_step(file_contents, step):\n \"\"\"Performs a step of the transformation.\n\n :param text file_contents: Contends of the cheetah template\n :param function step: Function taking xmldoc and returning new contents\n :returns: new contents of the file.\n \"\"\"\n assert type(file_contents) is not bytes\n xmldoc = parse(file_contents)\n return step(xmldoc)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _get_sender(*sender_params, **kwargs):\n notify_func = kwargs['notify_func']\n with _sender_instances_lock:\n existing_sender = _sender_instances.get(sender_params, None)\n if existing_sender:\n sender = existing_sender\n sender._notify = notify_func\n else:\n sender = _Sender(*sender_params, notify=notify_func)\n _sender_instances[sender_params] = sender\n return sender", "response": "Utility function acting as a Sender factory - creates a new instance of _Sender if one is not already created"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nstopping all the active Senders and closes all the sockets.", "response": "def terminate():\n \"\"\"\n Stops all the active Senders by flushing the buffers and closing the\n underlying sockets\n \"\"\"\n with _sender_instances_lock:\n for sender_key, sender in _sender_instances.items():\n sender.close()\n _sender_instances.clear()"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nformats the original event into the expected Alooma format and adds the metadata to the event as a message field.", "response": "def _format_event(self, orig_event, external_metadata=None):\n \"\"\"\n Format the event to the expected Alooma format, packing it into a\n message field and adding metadata\n :param orig_event: The original event that was sent, should be\n dict, str or unicode.\n :param external_metadata: (Optional) a dict containing metadata to add\n to the event\n :return: a dict with the original event in a 'message'\n field and all the supplied metadata\n \"\"\"\n event_wrapper = {}\n\n # Add ISO6801 timestamp and frame info\n timestamp = datetime.datetime.utcnow().isoformat()\n event_wrapper[consts.WRAPPER_REPORT_TIME] = timestamp\n\n # Add the enclosing frame\n frame = inspect.currentframe().f_back.f_back\n filename = frame.f_code.co_filename\n line_number = frame.f_lineno\n event_wrapper[consts.WRAPPER_CALLING_FILE] = str(filename)\n event_wrapper[consts.WRAPPER_CALLING_LINE] = str(line_number)\n\n # Add the UUID to the event\n event_wrapper[consts.WRAPPER_UUID] = str(uuid.uuid4())\n\n # Try to set event type. If it throws, put the input label\n try:\n event_wrapper[consts.WRAPPER_EVENT_TYPE] = \\\n self._get_event_type(orig_event)\n except Exception:\n pass # The event type will be the input name, added by Alooma\n\n # Optionally add external metadata\n if external_metadata and isinstance(external_metadata, dict):\n event_wrapper.update(external_metadata)\n\n # Wrap the event with metadata\n event_wrapper[consts.WRAPPER_MESSAGE] = orig_event\n\n return json_dumps(event_wrapper)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreporting an event to Alooma.", "response": "def report(self, event, metadata=None, block=None):\n \"\"\"\n Reports an event to Alooma by formatting it properly and placing it in\n the buffer to be sent by the Sender instance\n :param event: A dict / string representing an event\n :param metadata: (Optional) A dict with extra metadata to be attached to\n the event\n :param block: (Optional) If True, the function will block the thread\n until the event buffer has space for the event.\n If False, reported events are discarded if the queue is\n full. Defaults to None, which uses the global `block`\n parameter given in the `init`.\n :return: True if the event was successfully enqueued, else False\n \"\"\"\n # Don't allow reporting if the underlying sender is terminated\n if self._sender.is_terminated:\n self._notify(logging.ERROR, consts.LOG_MSG_REPORT_AFTER_TERMINATION)\n return False\n\n # Send the event to the queue if it is a dict or a string.\n if isinstance(event, (dict,) + py2to3.basestring):\n formatted_event = self._format_event(event, metadata)\n\n should_block = block if block is not None else self.is_blocking\n return self._sender.enqueue_event(formatted_event, should_block)\n\n else: # Event is not a dict nor a string. Deny it.\n error_message = (consts.LOG_MSG_BAD_EVENT % (type(event), event))\n self._notify(logging.ERROR, error_message)\n return False"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreports all the given events to Alooma by formatting them properly and placing them in the buffer.", "response": "def report_many(self, event_list, metadata=None, block=None):\n \"\"\"\n Reports all the given events to Alooma by formatting them properly and\n placing them in the buffer to be sent by the Sender instance\n :param event_list: A list of dicts / strings representing events\n :param metadata: (Optional) A dict with extra metadata to be attached to\n the event\n :param block: (Optional) If True, the function will block the thread\n until the event buffer has space for the event.\n If False, reported events are discarded if the queue is\n full. Defaults to None, which uses the global `block`\n parameter given in the `init`.\n :return: A list with tuples, each containing a failed event\n and its original index. An empty list means success\n \"\"\"\n failed_list = []\n for index, event in enumerate(event_list):\n queued_successfully = self.report(event, metadata, block)\n if not queued_successfully:\n failed_list.append((index, event))\n return failed_list"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncalls the callback function and logs the message using the PySDK logger AttributeNames", "response": "def _notify(self, log_level, message):\n \"\"\"\n Calls the callback function and logs messages using the PySDK logger\n :param log_level: An integer representing the log level, as specified\n in the Python `logging` library\n :param message: The actual message to be sent to the logger and the\n `callback` function\n \"\"\"\n timestamp = datetime.datetime.utcnow()\n logger.log(log_level, str(message))\n try:\n self._callback(log_level, message, timestamp)\n except Exception as ex:\n logger.warning(consts.LOG_MSG_CALLBACK_FAILURE % str(ex))"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nchecks availability of the Alooma server and returns True if the server is reachable.", "response": "def _verify_connection(self):\n \"\"\"\n Checks availability of the Alooma server\n :return: If the server is reachable, returns True\n :raises: If connection fails, raises exceptions.ConnectionFailed\n \"\"\"\n try:\n res = self._session.get(self._connection_validation_url, json={})\n logger.debug(consts.LOG_MSG_VERIFYING_CONNECTION,\n self._connection_validation_url,\n res if res else 'No result from backend')\n if not res.ok:\n raise requests.exceptions.RequestException(res.content)\n remote_batch_size = res.json().get(consts.MAX_REQUEST_SIZE_FIELD,\n consts.DEFAULT_BATCH_SIZE)\n if remote_batch_size < self._batch_max_size:\n self._batch_max_size = remote_batch_size\n self._notify(logging.INFO,\n consts.LOG_MSG_NEW_BATCH_SIZE % remote_batch_size)\n self._is_connected.set()\n return True\n\n except requests.exceptions.RequestException as ex:\n msg = consts.LOG_MSG_CONNECTION_FAILED % str(ex)\n self._notify(logging.ERROR, msg)\n raise exceptions.ConnectionFailed(msg)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nverify the validity of the token against the remote server.", "response": "def _verify_token(self):\n \"\"\"\n Verifies the validity of the token against the remote server\n :return: True if the token is valid, else raises exceptions.BadToken\n \"\"\"\n res = self._session.get(self._token_verification_url)\n if not res.ok:\n raise exceptions.BadToken(consts.LOG_MSG_BAD_TOKEN)\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsends a batch to the destination server.", "response": "def _send_batch(self, batch):\n \"\"\"\n Sends a batch to the destination server via HTTP REST API\n \"\"\"\n try:\n json_batch = '[' + ','.join(batch) + ']' # Make JSON array string\n logger.debug(consts.LOG_MSG_SENDING_BATCH, len(batch),\n len(json_batch), self._rest_url)\n res = self._session.post(self._rest_url, data=json_batch,\n headers=consts.CONTENT_TYPE_JSON)\n logger.debug(consts.LOG_MSG_BATCH_SENT_RESULT, res.status_code,\n res.content)\n if res.status_code == 400:\n self._notify(logging.CRITICAL, consts.LOG_MSG_BAD_TOKEN)\n raise exceptions.BadToken(consts.LOG_MSG_BAD_TOKEN)\n elif not res.ok:\n raise exceptions.SendFailed(\"Got bad response code - %s: %s\" % (\n res.status_code, res.content if res.content else 'No info'))\n except broken_pipe_errors as ex:\n self._is_connected.clear()\n raise exceptions.BatchTooBig(consts.LOG_MSG_BATCH_TOO_BIG % str(ex))\n except requests.exceptions.RequestException as ex:\n raise exceptions.SendFailed(str(ex))"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _sender_main(self):\n if not self._http_host:\n self._choose_host()\n last_batch_time = datetime.datetime.utcnow()\n\n while not (self._is_terminated.is_set() and self._event_queue.empty()):\n batch = None\n try:\n if not self._is_connected.is_set():\n self._verify_connection()\n\n batch = self._get_batch(last_batch_time)\n self._send_batch(batch)\n\n except exceptions.ConnectionFailed: # Failed to connect to server\n time.sleep(consts.NO_CONNECTION_SLEEP_TIME)\n self._is_connected.clear()\n\n except exceptions.EmptyBatch: # No events in queue, go to sleep\n time.sleep(consts.EMPTY_BATCH_SLEEP_TIME)\n\n except exceptions.SendFailed as ex: # Failed to send an event batch\n self._notify(ex.severity, str(ex))\n self._is_connected.clear()\n\n if batch: # Failed after pulling a batch from the queue\n self._enqueue_batch(batch)\n logger.debug(consts.LOG_MSG_ENQUEUED_FAILED_BATCH,\n len(batch))\n\n else: # We sent a batch successfully, server is reachable\n self._is_connected.set()\n\n finally: # Advance last batch time\n last_batch_time = datetime.datetime.utcnow()", "response": "Runs on a pysdk_sender_thread and handles sending events to the Alooma ArcGIS server."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef enqueue_event(self, event, block):\n try:\n self._event_queue.put_nowait(event)\n\n if self._notified_buffer_full: # Non-blocking and buffer was full\n self._notify(logging.WARNING, consts.LOG_MSG_BUFFER_FREED)\n self._notified_buffer_full = False\n\n except py2to3.queue.Full:\n if block: # Blocking - should block until space is freed\n self._event_queue.put(event)\n\n elif not self._notified_buffer_full: # Don't block, msg not emitted\n self._notify(logging.WARNING, consts.LOG_MSG_BUFFER_FULL)\n self._notified_buffer_full = True\n return False\n\n return True", "response": "Enqueue an event in the buffer."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nretrieve an event from the event queue.", "response": "def __get_event(self, block=True, timeout=1):\n \"\"\"\n Retrieves an event. If self._exceeding_event is not None, it'll be\n returned. Otherwise, an event is dequeued from the event buffer. If\n The event which was retrieved is bigger than the permitted batch size,\n it'll be omitted, and the next event in the event buffer is returned\n \"\"\"\n while True:\n if self._exceeding_event: # An event was omitted from last batch\n event = self._exceeding_event\n self._exceeding_event = None\n else: # No omitted event, get an event from the queue\n event = self._event_queue.get(block, timeout)\n\n event_size = len(event)\n\n # If the event is bigger than the permitted batch size, ignore it\n # The ( - 2 ) accounts for the parentheses enclosing the batch\n if event_size - 2 >= self._batch_max_size:\n self._notify(logging.WARNING,\n consts.LOG_MSG_OMITTED_OVERSIZED_EVENT\n % event_size)\n else: # Event is of valid size, return it\n return event"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_links(self, request=None):\n links = LinkNode()\n\n # Generate (path, method, view) given (path, method, callback).\n paths = []\n view_endpoints = []\n for path, method, callback in self.endpoints:\n view = self.create_view(callback, method, request)\n if getattr(view, 'exclude_from_schema', False):\n continue\n path = self.coerce_path(path, method, view)\n paths.append(path)\n view_endpoints.append((path, method, view))\n\n # Only generate the path prefix for paths that will be included\n if not paths:\n return None\n prefix = self.determine_path_prefix(paths)\n\n for path, method, view in view_endpoints:\n if not self.has_view_permissions(path, method, view):\n continue\n link = self.get_link(path, method, view, version=getattr(request, 'version', None))\n subpath = path[len(prefix):]\n keys = self.get_keys(subpath, method, view)\n try:\n insert_into(links, keys, link)\n except Exception:\n continue\n return links", "response": "Returns a dictionary containing all the links that should be included in the API schema."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_path_fields(self, path, method, view):\n model = getattr(getattr(view, 'queryset', None), 'model', None)\n fields = []\n\n for variable in uritemplate.variables(path):\n\n if variable == 'version':\n continue\n\n title = ''\n description = ''\n schema_cls = coreschema.String\n kwargs = {}\n if model is not None:\n # Attempt to infer a field description if possible.\n try:\n model_field = model._meta.get_field(variable)\n except:\n model_field = None\n\n if model_field is not None and model_field.verbose_name:\n title = force_text(model_field.verbose_name)\n\n if model_field is not None and model_field.help_text:\n description = force_text(model_field.help_text)\n elif model_field is not None and model_field.primary_key:\n description = get_pk_description(model, model_field)\n\n if hasattr(view, 'lookup_value_regex') and view.lookup_field == variable:\n kwargs['pattern'] = view.lookup_value_regex\n elif isinstance(model_field, models.AutoField):\n schema_cls = coreschema.Integer\n\n field = Field(\n name=variable,\n location='path',\n required=True,\n schema=schema_cls(title=title, description=description, **kwargs)\n )\n fields.append(field)\n\n return fields", "response": "Return a list of coreapi. Field instances corresponding to any templated path variables."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ntries to get the serializer class from view method.", "response": "def get_serializer_class(self, view, method_func):\n \"\"\"\n Try to get the serializer class from view method.\n If view method don't have request serializer, fallback to serializer_class on view class\n \"\"\"\n if hasattr(method_func, 'request_serializer'):\n return getattr(method_func, 'request_serializer')\n\n if hasattr(view, 'serializer_class'):\n return getattr(view, 'serializer_class')\n\n if hasattr(view, 'get_serializer_class'):\n return getattr(view, 'get_serializer_class')()\n\n return None"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef fallback_schema_from_field(self, field):\n title = force_text(field.label) if field.label else ''\n description = force_text(field.help_text) if field.help_text else ''\n\n # since we can't really inspect dictfield and jsonfield, at least display object as type\n # instead of string\n if isinstance(field, (serializers.DictField, serializers.JSONField)):\n return coreschema.Object(\n properties={},\n title=title,\n description=description\n )", "response": "Fallback schema for field that isn t inspected properly by DRF\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_serializer_fields(self, path, method, view, version=None, method_func=None):\n if method in ('PUT', 'PATCH', 'POST'):\n location = 'form'\n else:\n location = 'query'\n\n serializer_class = self.get_serializer_class(view, method_func)\n if not serializer_class:\n return []\n\n serializer = serializer_class()\n if isinstance(serializer, serializers.ListSerializer):\n return [\n Field(\n name='data',\n location=location,\n required=True,\n schema=coreschema.Array()\n )\n ]\n\n if not isinstance(serializer, serializers.Serializer):\n return []\n\n fields = []\n for field in serializer.fields.values():\n if field.read_only or isinstance(field, serializers.HiddenField):\n continue\n\n required = field.required and method != 'PATCH'\n # if the attribute ('help_text') of this field is a lazy translation object, force it to generate a string\n description = str(field.help_text) if isinstance(field.help_text, Promise) else field.help_text\n fallback_schema = self.fallback_schema_from_field(field)\n field = Field(\n name=field.field_name,\n location=location,\n required=required,\n schema=fallback_schema if fallback_schema else field_to_schema(field),\n description=description,\n )\n fields.append(field)\n\n return fields", "response": "Returns a list of coreapi. Field instances corresponding to any of the fields in the serializer."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.code = validated_data.get('code', instance.code)\n instance.linenos = validated_data.get('linenos', instance.linenos)\n instance.language = validated_data.get('language', instance.language)\n instance.style = validated_data.get('style', instance.style)\n instance.save()\n return instance", "response": "Update and return an existing Snippet instance given the validated data."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _generate_openapi_object(document):\n parsed_url = urlparse.urlparse(document.url)\n\n swagger = OrderedDict()\n\n swagger['swagger'] = '2.0'\n swagger['info'] = OrderedDict()\n swagger['info']['title'] = document.title\n swagger['info']['description'] = document.description\n swagger['info']['version'] = document.version\n\n if parsed_url.netloc:\n swagger['host'] = parsed_url.netloc\n if parsed_url.scheme:\n swagger['schemes'] = [parsed_url.scheme]\n\n swagger['paths'] = _get_paths_object(document)\n\n return swagger", "response": "Generates a dict that represents the OpenAPI spec."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _get_responses(link):\n template = link.response_schema\n template.update({'description': 'Success'})\n res = {200: template}\n res.update(link.error_status_codes)\n return res", "response": "Returns an OpenApi - compliant response object"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _get_parameters(link, encoding):\n parameters = []\n properties = {}\n required = []\n\n for field in link.fields:\n parser = OpenApiFieldParser(link, field)\n if parser.location == 'form':\n if encoding in ('multipart/form-data', 'application/x-www-form-urlencoded'):\n # 'formData' in swagger MUST be one of these media types.\n parameters.append(parser.as_parameter())\n else:\n # Expand coreapi fields with location='form' into a single swagger\n # parameter, with a schema containing multiple properties.\n properties[field.name] = parser.as_schema_property()\n if field.required:\n required.append(field.name)\n elif parser.location == 'body':\n parameters.append(parser.as_body_parameter(encoding))\n else:\n parameters.append(parser.as_parameter())\n\n if properties:\n parameter = {\n 'name': 'data',\n 'in': 'body',\n 'schema': {\n 'type': 'object',\n 'properties': properties\n }\n }\n if required:\n parameter['schema']['required'] = required\n parameters.append(parameter)\n\n return parameters", "response": "Generates Swagger Parameter Item object."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nbuilds the auth URI for the user.", "response": "def auth_uri(self, redirect_uri=None, scope=None, scope_delim=None, \n state=None, **kwargs):\n\n \"\"\" Builds the auth URI for the authorization endpoint\n\n :param scope: (optional) The `scope` parameter to pass for\n authorization. The format should match that expected by\n the provider (i.e. Facebook expects comma-delimited,\n while Google expects space-delimited)\n :param state: (optional) The `state` parameter to pass for\n authorization. If the provider follows the OAuth 2.0\n spec, this will be returned to your `redirect_uri` after\n authorization. Generally used for CSRF protection.\n :param **kwargs: Any other querystring parameters to be passed to the\n provider.\n \"\"\"\n kwargs.update({\n 'client_id': self.client_id,\n 'response_type': 'code',\n })\n\n if scope is not None:\n kwargs['scope'] = scope\n\n if state is not None:\n kwargs['state'] = state\n\n if redirect_uri is not None:\n kwargs['redirect_uri'] = redirect_uri\n\n return '%s?%s' % (self.auth_endpoint, urlencode(kwargs))"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef request_token(self, parser=None, redirect_uri=None, **kwargs):\n kwargs = kwargs and kwargs or {}\n\n parser = parser or _default_parser\n kwargs.update({\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'grant_type' in kwargs and kwargs['grant_type'] or \\\n 'authorization_code'\n })\n if redirect_uri is not None:\n kwargs.update({'redirect_uri': redirect_uri})\n\n # TODO: maybe raise an exception here if status code isn't 200?\n msg = urlopen(self.token_endpoint, urlencode(kwargs).encode(\n 'utf-8'))\n data = parser(msg.read().decode(msg.info().get_content_charset() or\n 'utf-8'))\n\n for key in data:\n setattr(self, key, data[key])\n\n # expires_in is RFC-compliant. if anything else is used by the\n # provider, token_expires must be set manually\n if hasattr(self, 'expires_in'):\n try:\n # python3 dosn't support long\n seconds = long(self.expires_in)\n except:\n seconds = int(self.expires_in)\n self.token_expires = mktime((datetime.utcnow() + timedelta(\n seconds=seconds)).timetuple())", "response": "Request an access token from the token endpoint."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nrequest user data from the resource endpoint.", "response": "def request(self, url, method=None, data=None, headers=None, parser=None): \n \"\"\" Request user data from the resource endpoint\n :param url: The path to the resource and querystring if required\n :param method: HTTP method. Defaults to ``GET`` unless data is not None\n in which case it defaults to ``POST``\n :param data: Data to be POSTed to the resource endpoint\n :param parser: Parser callback to deal with the returned data. Defaults\n to ``json.loads`.`\n \"\"\"\n assert self.access_token is not None\n parser = parser or loads \n\n if not method:\n method = 'GET' if not data else 'POST'\n\n req = self.token_transport('{0}{1}'.format(self.resource_endpoint, \n url), self.access_token, data=data, method=method, headers=headers)\n\n resp = urlopen(req)\n data = resp.read()\n try:\n return parser(data.decode(resp.info().get_content_charset() or\n 'utf-8'))\n # try to decode it first using either the content charset, falling\n # back to utf-8\n\n except UnicodeDecodeError:\n # if we've gotten a decoder error, the calling code better know how\n # to deal with it. some providers (i.e. stackexchange) like to gzip\n # their responses, so this allows the client code to handle it\n # directly.\n return parser(data)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef build_srcdict(gta, prop):\n o = {}\n for s in gta.roi.sources:\n o[s.name] = s[prop]\n return o", "response": "Build a dictionary that maps from source name to the value of a source property"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nbuilding and return a list of source name", "response": "def get_src_names(gta):\n \"\"\"Build and return a list of source name\n\n Parameters\n ----------\n\n gta : `fermipy.GTAnalysis`\n The analysis object\n\n\n Returns\n -------\n\n l : list\n Names of the source\n\n \"\"\"\n o = []\n for s in gta.roi.sources:\n o += [s.name]\n return sorted(o)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nsetting a weights file and get the weighted npred for all the sources", "response": "def set_wts_get_npred_wt(gta, maskname):\n \"\"\"Set a weights file and get the weighted npred for all the sources\n\n Parameters\n ----------\n\n gta : `fermipy.GTAnalysis`\n The analysis object\n\n maskname : str\n The path to the file with the mask\n\n\n Returns\n -------\n\n odict : dict\n Dictionary mapping from source name to weighted npred\n\n \"\"\"\n if is_null(maskname):\n maskname = None\n\n gta.set_weights_map(maskname)\n for name in gta.like.sourceNames():\n gta._init_source(name)\n gta._update_roi()\n return build_srcdict(gta, 'npred_wt')"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ntakes a snapshot of the ROI and make plots for the ROI.", "response": "def snapshot(gta, plotter, key, do_weighted=True, make_plots=True):\n \"\"\"Take a snapshot of the ROI\n\n Parameters\n ----------\n\n gta : `fermipy.GTAnalysis`\n The analysis object\n\n plotter : `fermipy.plotting.AnalysisPlotter`\n The object that makes the plots\n\n key : str\n Key for this snapshot, used to create filenames\n\n do_weighted : bool\n If True, include weighted version of outputs\n\n make_plots : bool\n If True, make plots\n\n \"\"\"\n gta.write_roi(key, save_model_map=True, make_plots=make_plots, save_weight_map=do_weighted)\n if make_plots:\n o = gta.residmap(key)\n plotter.make_residmap_plots(o, gta.roi)\n\n if do_weighted:\n gta.make_plots(\"%s_wt\"%key, weighted=True)\n o = gta.residmap(\"%s_wt\"%key, use_weights=True)\n plotter.make_residmap_plots(o, gta.roi)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncomparing two dictionarys of npreds and get the list of sources that have changed less that set thresholds", "response": "def get_unchanged(src_list, npred_dict_new,\n npred_dict_old,\n npred_threshold=1e4,\n frac_threshold=0.9):\n \"\"\"Compare two dictionarys of npreds, and get the list of sources\n than have changed less that set thresholds\n\n Parameters\n ----------\n\n src_list : list\n List of sources to examine\n\n npred_dict_new : dict\n Dictionary mapping source name to npred for the current weights file\n\n npred_dict_old : dict\n Dictionary mapping source name to npred for the previous weights file\n\n npred_threshold : float\n Minimum value of npred above which to consider sources changed\n\n frac_threshold : float\n Value of npred_old / npred_new above which to consider sources unchanged\n\n\n Returns\n -------\n\n l : list\n Names of 'unchanged' sources\n\n \"\"\"\n o = []\n for s in src_list:\n npred_new = npred_dict_new[s]\n if npred_new < npred_threshold:\n o += [s]\n continue\n if npred_dict_old is None:\n npred_old = 0.\n else:\n npred_old = npred_dict_old[s]\n frac = npred_old / npred_new\n if frac > frac_threshold:\n o += [s]\n return o"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef build_job_configs(self, args):\n job_configs = {}\n\n # Tweak the batch job args\n try:\n self._interface._lsf_args.update(dict(n=2))\n self._interface._lsf_args.update(dict(R='\\\"select[rhel60&&!fell] -R span[hosts=1]\\\"'))\n except AttributeError:\n pass\n\n models = load_yaml(args['models'])\n\n base_config = dict(fit_strategy=args['fit_strategy'],\n input_pars=args['input_pars'],\n load_baseline=args['load_baseline'],\n make_plots=args['make_plots'])\n\n for modelkey in models:\n config_file = os.path.join('analysis', 'model_%s' % modelkey,\n args['config'])\n #roi_baseline = os.path.join('analysis', 'model_%s' % modelkey,\n # args['roi_baseline'])\n roi_baseline = args['roi_baseline']\n logfile = os.path.join('analysis', 'model_%s' % modelkey,\n 'fit_%s.log' % modelkey)\n job_config = base_config.copy()\n job_config.update(dict(config=config_file,\n roi_baseline=roi_baseline,\n logfile=logfile))\n job_configs[modelkey] = job_config\n\n return job_configs", "response": "Hook to build the job configurations for the current batch job"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_native_default_args():\n native_default_args = dict(max_jobs=500,\n time_per_cycle=15,\n jobs_per_cycle=20,\n max_job_age=90,\n no_batch=False)\n return native_default_args.copy()", "response": "Get the correct set of batch jobs arguments."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nsend a single job to be executed", "response": "def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):\n \"\"\"Send a single job to be executed\n\n Parameters\n ----------\n\n link : `fermipy.jobs.chain.Link`\n The link used to invoke the command we are running\n\n key : str\n A string that identifies this particular instance of the job\n\n job_config : dict\n A dictionrary with the arguments for the job. Used with\n the self._command_template job template\n\n logfile : str\n The logfile for this job, may be used to check for success/ failure\n \"\"\"\n full_sub_dict = job_config.copy()\n\n full_command = \"%s >& %s\" % (\n link.command_template().format(**full_sub_dict), logfile)\n\n logdir = os.path.dirname(logfile)\n\n if self._dry_run:\n sys.stdout.write(\"%s\\n\" % full_command)\n else:\n try:\n os.makedirs(logdir)\n except OSError:\n pass\n os.system(full_command)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef log_level(level):\n\n levels_dict = {0: 50,\n 1: 40,\n 2: 30,\n 3: 20,\n 4: 10}\n\n if not isinstance(level, int):\n level = int(level)\n\n if level > 4:\n level = 4\n\n return levels_dict[level]", "response": "This is a function that returns a python like\n level from a HEASOFT like level."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncreate a python logger instance and configure it.", "response": "def configure(name, logfile, loglevel=logging.DEBUG):\n \"\"\"Create a python logger instance and configure it.\n\n Parameters\n ----------\n name : str\n Logger name.\n\n logfile : str\n Path to the log file.\n\n loglevel : int\n Default log level for STDOUT.\n \"\"\"\n\n # logging.config.dictConfig({\n # 'version': 1,\n # 'disable_existing_loggers': False})\n\n logger = logging.getLogger(name)\n\n # Don't propagate to root logger\n logger.propagate = False\n logger.setLevel(logging.DEBUG)\n\n datefmt = '%Y-%m-%d %H:%M:%S'\n format_stream = ('%(asctime)s %(levelname)-8s'\n '%(name)s.%(funcName)s(): %(message)s')\n format_file = ('%(asctime)s %(levelname)-8s'\n '%(name)s.%(funcName)s(): %(message)s')\n# format_file = ('%(asctime)s %(levelname)-8s '\n# '%(name)s.%(funcName)s() '\n# '[%(filename)s:%(lineno)d]: %(message)s')\n\n if not logger.handlers:\n\n # Add a file handler\n if logfile is not None:\n logfile = logfile.replace('.log', '') + '.log'\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(logging.Formatter(format_file, datefmt))\n logger.addHandler(fh)\n\n # Add a stream handler\n ch = logging.StreamHandler()\n ch.setLevel(loglevel)\n ch.setFormatter(logging.Formatter(format_stream, datefmt))\n logger.addHandler(ch)\n else:\n logger.handlers[-1].setLevel(loglevel)\n\n return logger"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nextracts a set of arguments from a large dictionary.", "response": "def extract_arguments(args, defaults):\n \"\"\"Extract a set of arguments from a large dictionary\n\n Parameters\n ----------\n\n args : dict\n Dictionary with the arguments values to use\n\n defaults : dict\n Dictionary with all the argument to extract, and default values for each\n\n Returns\n -------\n\n out_dict : dict\n A dictionary with only the extracted arguments\n\n \"\"\"\n out_dict = convert_option_dict_to_dict(defaults)\n for key in defaults.keys():\n mapped_val = args.get(key, None)\n if mapped_val is None:\n pass\n else:\n out_dict[key] = mapped_val\n return out_dict"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef check_files(filelist,\n file_stage_manager=None,\n return_found=True,\n return_missing=True):\n \"\"\"Check that all files in a list exist\n\n Parameters\n ----------\n\n filelist : list\n The list of files we are checking for.\n\n file_stage_manager : `fermipy.jobs.file_archive.FileStageManager`\n A object that maps files to scratch space if needed.\n\n return_found : list\n A list with the paths of the files that were found.\n\n return_missing : list\n A list with the paths of the files that were missing.\n\n Returns\n -------\n found : list\n List of the found files, if requested, otherwise `None`\n\n missing : list\n List of the missing files, if requested, otherwise `None`\n\n \"\"\"\n found = []\n missing = []\n none_count = 0\n for fname in filelist:\n\n if fname is None:\n none_count += 1\n continue\n if fname[0] == '@':\n fname = fname[1:]\n if os.path.exists(fname):\n found.append(fname)\n continue\n if os.path.exists(fname + '.gz'):\n found.append(fname)\n continue\n if file_stage_manager is not None:\n fname = file_stage_manager.get_scratch_path(fname)\n if os.path.exists(fname):\n found.append(fname)\n continue\n missing.append(fname)\n if return_found and return_missing:\n return found, missing\n elif return_found:\n return found\n elif return_missing:\n return missing\n return None", "response": "Check that all files in a list exist in a scratch space."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef add_argument(parser, dest, info):\n default, helpstr, typeinfo = info\n\n if dest == 'args':\n parser.add_argument('args', nargs='+', default=None, help=helpstr)\n elif typeinfo == list:\n parser.add_argument('--%s' % dest, action='append', help=helpstr)\n elif typeinfo == bool:\n parser.add_argument('--%s' % dest, action='store_true', help=helpstr)\n else:\n parser.add_argument('--%s' % dest, action='store', type=typeinfo,\n default=default, help=helpstr)", "response": "Adds an argument to an argument parser object."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nconverting a simple key - value dictionary to a dictionary of options tuples", "response": "def convert_dict_to_option_dict(input_dict):\n \"\"\"Convert a simple key-value dictionary to a dictionary of options tuples\"\"\"\n ret_dict = {}\n for key, value in input_dict.items():\n ret_dict[key] = convert_value_to_option_tuple(value)\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nconverts a dictionary of options tuples to a simple key - value dictionary", "response": "def convert_option_dict_to_dict(option_dict):\n \"\"\"Convert a dictionary of options tuples to a simple key-value dictionary\"\"\"\n ret_dict = {}\n for key, value in option_dict.items():\n if is_null(value):\n ret_dict[key] = None\n elif isinstance(value, tuple):\n ret_dict[key] = value[0]\n else:\n ret_dict[key] = value\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreducing a dictionary by selecting a set of keys", "response": "def reduce_by_keys(orig_dict, keys, default=None):\n \"\"\"Reduce a dictionary by selecting a set of keys \"\"\"\n ret = {}\n for key in keys:\n ret[key] = orig_dict.get(key, default)\n return ret"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef construct_docstring(options):\n s = \"\\nParameters\\n\"\n s += \"----------\\n\\n\"\n for key, opt in options.items():\n s += \"%s : %s\\n %s [%s]\\n\" % (key, str(opt[2]),\n str(opt[1]), str(opt[0]))\n return s", "response": "Construct a docstring for a set of options"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nfill an argument parser with the options from this chain", "response": "def _fill_argparser(self, parser):\n \"\"\"Fill an `argparser.ArgumentParser` with the options from this chain\n \"\"\"\n for key, val in self._options.items():\n add_argument(parser, key, val)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ninitializing a link with a set of arguments using an argparser. ArgumentParser", "response": "def _run_argparser(self, argv):\n \"\"\"Initialize a link with a set of arguments using an `argparser.ArgumentParser`\n \"\"\"\n if self._parser is None:\n raise ValueError('Link was not given a parser on initialization')\n args = self._parser.parse_args(argv)\n self.update_args(args.__dict__)\n return args"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _latch_file_info(self):\n self.files.file_dict.clear()\n self.files.latch_file_info(self.args)", "response": "Internal function to update the dictionary holding the file info for this process"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nupdate a file dict with information from self", "response": "def _update_sub_file_dict(self, sub_files):\n \"\"\"Update a file dict with information from self\"\"\"\n sub_files.file_dict.clear()\n for job_details in self.jobs.values():\n if job_details.file_dict is not None:\n sub_files.update(job_details.file_dict)\n if job_details.sub_file_dict is not None:\n sub_files.update(job_details.sub_file_dict)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ndo some checks before running this link.", "response": "def _pre_run_checks(self, stream=sys.stdout, dry_run=False):\n \"\"\"Do some checks before running this link\n\n This checks if input and output files are present.\n\n If input files are missing this will raise `OSError` if dry_run is False\n If all output files are present this return False.\n\n Parameters\n -----------\n stream : `file`\n Stream that this function will print to,\n Must have 'write' function\n\n dry_run : bool\n Print command but do not run it\n\n Returns\n -------\n status : bool\n True if it is ok to proceed with running the link\n\n \"\"\"\n input_missing = self.check_input_files(return_found=False)\n if input_missing:\n if dry_run: \n stream.write(\"Input files are missing: %s: %i\\n\" %\n (self.linkname, len(input_missing)))\n else:\n print (self.args)\n raise OSError(\"Input files are missing: %s\" % input_missing)\n\n output_found, output_missing = self.check_output_files()\n if output_found and not output_missing:\n stream.write(\"All output files for %s already exist: %i %i %i\\n\" %\n (self.linkname, len(output_found),\n len(output_missing), len(self.files.output_files)))\n if dry_run:\n pass\n else:\n pass\n # return False\n return True"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _create_job_details(self, key, job_config, logfile, status):\n self.update_args(job_config)\n job_details = JobDetails(jobname=self.full_linkname,\n jobkey=key,\n appname=self.appname,\n logfile=logfile,\n job_config=job_config,\n timestamp=get_timestamp(),\n file_dict=copy.deepcopy(self.files),\n sub_file_dict=copy.deepcopy(self.sub_files),\n status=status)\n return job_details", "response": "Create a JobDetails object for a single job."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _map_scratch_files(self, file_dict):\n if self._file_stage is None:\n return ({}, {})\n input_files = file_dict.input_files_to_stage\n output_files = file_dict.output_files_to_stage\n input_file_mapping = self._file_stage.map_files(input_files)\n output_file_mapping = self._file_stage.map_files(output_files)\n self._update_file_args(input_file_mapping)\n self._update_file_args(output_file_mapping)\n return input_file_mapping, output_file_mapping", "response": "Build and return the mapping for copying files to and from scratch area"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nadjusting the arguments to deal with staging files to the scratch area", "response": "def _update_file_args(self, file_mapping):\n \"\"\"Adjust the arguments to deal with staging files to the scratch area\"\"\"\n for key, value in self.args.items():\n new_value = file_mapping.get(value, value)\n if new_value != value:\n self.args[key] = new_value"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _stage_input_files(self, file_mapping, dry_run=True):\n # print (\"Staging input \", file_mapping)\n if self._file_stage is None:\n return\n self._file_stage.copy_to_scratch(file_mapping, dry_run)", "response": "Stage the input files to the scratch area and adjust the arguments accordingly"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _stage_output_files(self, file_mapping, dry_run=True):\n # print (\"Staging output \", file_mapping)\n if self._file_stage is None:\n return\n self._file_stage.copy_from_scratch(file_mapping, dry_run)", "response": "Stage the output files to the scratch area and adjust the arguments accordingly"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _run_link(self, stream=sys.stdout, dry_run=False,\n stage_files=True, resubmit_failed=False):\n \"\"\"Internal function that actually runs this link.\n\n This checks if input and output files are present.\n\n If input files are missing this will raise `OSError` if dry_run is False\n If all output files are present this will skip execution.\n\n Parameters\n -----------\n stream : `file`\n Stream that this `Link` will print to,\n must have 'write' function.\n\n dry_run : bool\n Print command but do not run it.\n\n stage_files : bool\n Stage files to and from the scratch area.\n\n resubmit_failed : bool\n Resubmit failed jobs.\n \"\"\"\n check_ok = self._pre_run_checks(stream, dry_run)\n if not check_ok:\n return\n\n if self._file_stage is not None:\n input_file_mapping, output_file_mapping = self._map_scratch_files(\n self.files)\n if stage_files:\n self._file_stage.make_scratch_dirs(input_file_mapping, dry_run)\n self._file_stage.make_scratch_dirs(\n output_file_mapping, dry_run)\n self._stage_input_files(input_file_mapping, dry_run)\n\n return_code = self.run_command(stream, dry_run)\n print (\"return code \", return_code)\n if return_code == 0:\n status = JobStatus.done\n if self._file_stage is not None and stage_files:\n self._stage_output_files(output_file_mapping, dry_run)\n self._finalize(dry_run)\n else:\n if resubmit_failed:\n print (\"Not resubmitting failed link %s\"%(self.linkname))\n status = JobStatus.failed\n if dry_run:\n return\n self._write_status_to_log(return_code, stream)\n self._set_status_self(status=status)", "response": "Internal function that actually runs the link."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating a JobDetails object for this link and add it to the self. jobs dictionary.", "response": "def _register_job(self, key, job_config, logfile, status):\n \"\"\"Create a `JobDetails` for this link\n and add it to the self.jobs dictionary.\n\n Parameters\n ----------\n\n key : str\n Key used to identify this particular job\n\n job_config : dict\n Dictionary with arguments passed to this particular job\n\n logfile : str\n Name of the associated log file\n\n status : int\n Current status of the job\n\n Returns\n -------\n job_details : `fermipy.jobs.JobDetails`\n Object with the details about this particular job.\n\n \"\"\"\n job_details = self._create_job_details(\n key, job_config, logfile, status)\n self.jobs[job_details.fullkey] = job_details\n return job_details"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nrunning this link captures output to logfile and records the job in self. jobs", "response": "def _register_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown):\n \"\"\"Runs this link, captures output to logfile,\n and records the job in self.jobs\"\"\"\n fullkey = JobDetails.make_fullkey(self.full_linkname, key)\n if fullkey in self.jobs:\n job_details = self.jobs[fullkey]\n job_details.status = status\n else:\n job_details = self._register_job(key, self.args, logfile, status)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _archive_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown):\n self._register_self(logfile, key, status)\n if self._job_archive is None:\n return\n self._job_archive.register_jobs(self.get_jobs())", "response": "Write info about a job run by this Link to the job archive"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _set_status_self(self, key=JobDetails.topkey, status=JobStatus.unknown):\n fullkey = JobDetails.make_fullkey(self.full_linkname, key)\n if fullkey in self.jobs:\n self.jobs[fullkey].status = status\n if self._job_archive:\n self._job_archive.register_job(self.jobs[fullkey])\n else:\n self._register_self('dummy.log', key, status)", "response": "Set the status of this job in the JobArchive if it is present."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _write_status_to_log(self, return_code, stream=sys.stdout):\n stream.write(\"Timestamp: %i\\n\" % get_timestamp())\n if return_code == 0:\n stream.write(\"%s\\n\" % self._interface.string_successful)\n else:\n stream.write(\"%s %i\\n\" %\n (self._interface.string_exited, return_code))", "response": "Write the status of this job to a log stream."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _finalize(self, dry_run=False):\n for rmfile in self.files.temp_files:\n if dry_run:\n print(\"remove %s\" % rmfile)\n else:\n os.remove(rmfile)\n for gzfile in self.files.gzip_files:\n if dry_run:\n # print (\"gzip %s\" % gzfile)\n pass\n else:\n os.system('gzip -9 %s' % gzfile)", "response": "Remove and compress files as requested"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef update_args(self, override_args):\n self.args = extract_arguments(override_args, self.args)\n self._latch_file_info()\n scratch_dir = self.args.get('scratch', None)\n if is_not_null(scratch_dir):\n self._file_stage = FileStageManager(scratch_dir, '.')", "response": "Update the arguments used to invoke the application."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a dictionary with the subset of jobs that are marked as failed and the jobs that are not pending.", "response": "def get_failed_jobs(self, fail_running=False, fail_pending=False):\n \"\"\"Return a dictionary with the subset of jobs that are marked as failed\n\n Parameters\n ----------\n fail_running : `bool`\n If True, consider running jobs as failed\n\n fail_pending : `bool`\n If True, consider pending jobs as failed\n\n Returns\n -------\n failed_jobs : dict\n Dictionary mapping from job key to `JobDetails` for the failed jobs.\n\n \"\"\"\n failed_jobs = {}\n for job_key, job_details in self.jobs.items():\n if job_details.status == JobStatus.failed:\n failed_jobs[job_key] = job_details\n elif job_details.status == JobStatus.partial_failed:\n failed_jobs[job_key] = job_details\n elif fail_running and job_details.status == JobStatus.running:\n failed_jobs[job_key] = job_details\n elif fail_pending and job_details.status <= JobStatus.pending:\n failed_jobs[job_key] = job_details\n return failed_jobs"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nchecking the status of a particular job in question.", "response": "def check_job_status(self, key=JobDetails.topkey,\n fail_running=False,\n fail_pending=False,\n force_check=False):\n \"\"\"Check the status of a particular job\n\n By default this checks the status of the top-level job, but\n can by made to drill into the sub-jobs.\n\n Parameters\n ----------\n key : str\n Key associated to the job in question\n\n fail_running : `bool`\n If True, consider running jobs as failed\n\n fail_pending : `bool`\n If True, consider pending jobs as failed\n\n force_check : `bool`\n Drill into status of individual jobs` instead of using top level job only\n\n Returns\n -------\n status : `JobStatus`\n Job status flag\n\n \"\"\"\n if key in self.jobs:\n status = self.jobs[key].status\n if status in [JobStatus.unknown, JobStatus.ready,\n JobStatus.pending, JobStatus.running] or force_check:\n status = self._interface.check_job(self.jobs[key])\n if status == JobStatus.running and fail_running:\n status = JobStatus.failed\n if status == JobStatus.pending and fail_pending:\n status = JobStatus.failed\n self.jobs[key].status = status\n if self._job_archive:\n self._job_archive.register_job(self.jobs[key])\n else:\n status = JobStatus.no_job\n\n return status"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncheck the status of all the jobs run from this link and return a status flag that summarizes that.", "response": "def check_jobs_status(self,\n fail_running=False,\n fail_pending=False):\n \"\"\"Check the status of all the jobs run from this link\n and return a status flag that summarizes that.\n\n Parameters\n ----------\n\n fail_running : `bool`\n If True, consider running jobs as failed\n\n fail_pending : `bool`\n If True, consider pending jobs as failed\n\n Returns\n -------\n status : `JobStatus`\n Job status flag that summarizes the status of all the jobs,\n\n \"\"\"\n\n n_failed = 0\n n_partial = 0\n n_passed = 0\n n_total = 0\n for job_details in self.jobs.values():\n n_total += 1\n if job_details.status in [JobStatus.failed, JobStatus.partial_failed]:\n n_failed += 1\n elif fail_running and job_details.status == JobStatus.running:\n n_failed += 1\n elif fail_pending and job_details.status == JobStatus.pending:\n n_failed += 1\n elif job_details.status == JobStatus.done:\n n_passed += 1\n\n if n_failed > 0:\n return JobStatus.failed\n elif n_passed == n_total:\n return JobStatus.done\n elif n_passed > 0:\n return JobStatus.running\n\n return JobStatus.pending"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a dictionary with all the jobs", "response": "def get_jobs(self, recursive=True):\n \"\"\"Return a dictionary with all the jobs\n\n For sub-classes, if recursive is True this will include jobs\n from any internal `Link`\n \"\"\"\n if recursive:\n ret_dict = self.jobs.copy()\n return ret_dict\n return self.jobs"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nchecking if input files exist.", "response": "def check_input_files(self,\n return_found=True,\n return_missing=True):\n \"\"\"Check if input files exist.\n\n Parameters\n ----------\n return_found : list\n A list with the paths of the files that were found.\n\n return_missing : list\n A list with the paths of the files that were missing.\n\n Returns\n -------\n found : list\n List of the found files, if requested, otherwise `None`\n\n missing : list\n List of the missing files, if requested, otherwise `None`\n\n \"\"\"\n all_input_files = self.files.chain_input_files + self.sub_files.chain_input_files\n return check_files(all_input_files, self._file_stage,\n return_found, return_missing)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef check_output_files(self,\n return_found=True,\n return_missing=True):\n \"\"\"Check if output files exist.\n\n Parameters\n ----------\n return_found : list\n A list with the paths of the files that were found.\n\n return_missing : list\n A list with the paths of the files that were missing.\n\n Returns\n -------\n found : list\n List of the found files, if requested, otherwise `None`\n\n missing : list\n List of the missing files, if requested, otherwise `None`\n\n \"\"\"\n all_output_files = self.files.chain_output_files + \\\n self.sub_files.chain_output_files\n return check_files(all_output_files, self._file_stage,\n return_found, return_missing)", "response": "Check if output files exist."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef missing_input_files(self):\n missing = self.check_input_files(return_found=False)\n ret_dict = {}\n for miss_file in missing:\n ret_dict[miss_file] = [self.linkname]\n return ret_dict", "response": "Make and return a dictionary of the missing input files. This returns a dictionary mapping the filepath to list of Link that use the file as input."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nmake and return a dictionary of the missing output files. This returns a dictionary mapping the filepath to list of links that produce the file as output.", "response": "def missing_output_files(self):\n \"\"\"Make and return a dictionary of the missing output files.\n\n This returns a dictionary mapping\n filepath to list of links that produce the file as output.\n \"\"\"\n missing = self.check_output_files(return_found=False)\n ret_dict = {}\n for miss_file in missing:\n ret_dict[miss_file] = [self.linkname]\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nbuilding and return the formatted command for this Link.", "response": "def formatted_command(self):\n \"\"\"Build and return the formatted command for this `Link`.\n\n This is exactly the command as called from the Unix command line.\n \"\"\"\n # FIXME, this isn't really great as it force you to have all the arguments\n command_template = self.command_template()\n format_dict = self.args.copy()\n\n for key, value in format_dict.items():\n # protect whitespace\n if isinstance(value, list):\n outstr = \"\"\n if key == 'args':\n outkey = \"\"\n else:\n outkey = \"--%s \"\n for lval in value:\n outstr += ' '\n outstr += outkey\n outstr += lval\n format_dict[key] = '\"%s\"' % outstr\n elif isinstance(value, str) and value.find(' ') >= 0 and key != 'args':\n format_dict[key] = '\"%s\"' % value\n elif value is None:\n format_dict[key] = 'none'\n\n command = command_template.format(**format_dict)\n return command"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef run_command(self, stream=sys.stdout, dry_run=False):\n command = self.formatted_command()\n if dry_run:\n stream.write(\"%s\\n\" % command)\n stream.flush()\n return 0\n proc = subprocess.Popen(command.split(),\n stderr=stream,\n stdout=stream)\n proc.communicate()\n return proc.returncode", "response": "Runs the command for this link."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef run_with_log(self, dry_run=False, stage_files=True, resubmit_failed=False):\n fullkey = JobDetails.make_fullkey(self.full_linkname)\n job_details = self.jobs[fullkey]\n odir = os.path.dirname(job_details.logfile)\n try:\n os.makedirs(odir)\n except OSError:\n pass\n ostream = open(job_details.logfile, 'w')\n self.run(ostream, dry_run, stage_files, resubmit_failed)", "response": "Runs this link with output sent to a pre - defined logfile."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nbuilds and return a string that can be used as a template invoking this chain from the command line.", "response": "def command_template(self):\n \"\"\"Build and return a string that can be used as a template invoking\n this chain from the command line.\n\n The actual command can be obtainted by using\n `self.command_template().format(**self.args)`\n \"\"\"\n com_out = self.appname\n arg_string = \"\"\n flag_string = \"\"\n # Loop over the key, value pairs in self.args\n\n for key, val in self.args.items():\n # Check if the value is set in self._options\n # If so, get the value from there\n if val is None:\n opt_val = self._options[key][0]\n else:\n opt_val = val\n opt_type = self._options[key][2]\n if key == 'args':\n # 'args' is special, pull it out and move it to the back\n arg_string += ' {%s}' % key\n elif opt_type is bool:\n if opt_val:\n flag_string += ' --%s' % (key)\n elif opt_type is list:\n if is_null(opt_val):\n continue\n elif isinstance(opt_val, str):\n com_out += ' --%s %s' % (key, opt_val)\n elif isinstance(opt_val, list):\n for arg_val in opt_val:\n com_out += ' --%s %s' % (key, arg_val)\n else:\n com_out += ' --%s {%s}' % (key, key)\n com_out += flag_string\n com_out += arg_string\n return com_out"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nprints a summary of the activity done by this Link.", "response": "def print_summary(self, stream=sys.stdout, indent=\"\", recurse_level=2):\n \"\"\"Print a summary of the activity done by this `Link`.\n\n Parameters\n -----------\n stream : `file`\n Stream to print to, must have 'write' method.\n\n indent : str\n Indentation at start of line\n\n recurse_level : int\n Number of recursion levels to print\n\n \"\"\"\n if recurse_level < 0:\n return\n stream.write(\"%sLink: %s\\n\" % (indent, self.linkname))\n stream.write(\"%sN_jobs: %s\\n\" % (indent, len(self.get_jobs())))\n self.sub_files.print_chain_summary(stream, indent)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef distance_to_edge(geom, skydir):\n\n # FIXME: We should add a pixel_size property in gammapy.maps\n # FIXME: We should make this into a MapGeom method \n \n xpix, ypix = skydir.to_pixel(geom.wcs, origin=0)\n deltax = np.array((xpix - geom.center_pix[0]) * geom._cdelt[0],\n ndmin=1)\n deltay = np.array((ypix - geom.center_pix[1]) * geom._cdelt[1],\n ndmin=1)\n\n deltax = np.abs(deltax) - 0.5 * geom.width[0]\n deltay = np.abs(deltay) - 0.5 * geom.width[1]\n\n m0 = (deltax < 0) & (deltay < 0)\n m1 = (deltax > 0) & (deltay < 0)\n m2 = (deltax < 0) & (deltay > 0)\n m3 = (deltax > 0) & (deltay > 0)\n mx = np.abs(deltax) <= np.abs(deltay)\n my = np.abs(deltay) < np.abs(deltax)\n\n delta = np.zeros(len(deltax))\n delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1]\n delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2]\n return delta", "response": "Return the angular distance from the given direction and\n the edge of the projection."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_wcs(skydir, coordsys='CEL', projection='AIT',\n cdelt=1.0, crpix=1., naxis=2, energies=None):\n \"\"\"Create a WCS object.\n\n Parameters\n ----------\n skydir : `~astropy.coordinates.SkyCoord`\n Sky coordinate of the WCS reference point.\n coordsys : str\n\n projection : str\n\n cdelt : float or (float,float)\n In the first case the same value is used for x and y axes\n crpix : float or (float,float)\n In the first case the same value is used for x and y axes\n naxis : {2, 3}\n Number of dimensions of the projection.\n energies : array-like\n Array of energies that defines the third dimension if naxis=3.\n \"\"\"\n\n w = WCS(naxis=naxis)\n\n if coordsys == 'CEL':\n w.wcs.ctype[0] = 'RA---%s' % (projection)\n w.wcs.ctype[1] = 'DEC--%s' % (projection)\n w.wcs.crval[0] = skydir.icrs.ra.deg\n w.wcs.crval[1] = skydir.icrs.dec.deg\n elif coordsys == 'GAL':\n w.wcs.ctype[0] = 'GLON-%s' % (projection)\n w.wcs.ctype[1] = 'GLAT-%s' % (projection)\n w.wcs.crval[0] = skydir.galactic.l.deg\n w.wcs.crval[1] = skydir.galactic.b.deg\n else:\n raise Exception('Unrecognized coordinate system.')\n\n try:\n w.wcs.crpix[0] = crpix[0]\n w.wcs.crpix[1] = crpix[1]\n except:\n w.wcs.crpix[0] = crpix\n w.wcs.crpix[1] = crpix\n\n try:\n w.wcs.cdelt[0] = cdelt[0]\n w.wcs.cdelt[1] = cdelt[1]\n except:\n w.wcs.cdelt[0] = -cdelt\n w.wcs.cdelt[1] = cdelt\n\n w = WCS(w.to_header())\n if naxis == 3 and energies is not None:\n w.wcs.crpix[2] = 1\n w.wcs.crval[2] = energies[0]\n w.wcs.cdelt[2] = energies[1] - energies[0]\n w.wcs.ctype[2] = 'Energy'\n w.wcs.cunit[2] = 'MeV'\n\n return w", "response": "Create a WCS object for a given skydir."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncopies a WCS object and add on the energy axis.", "response": "def wcs_add_energy_axis(wcs, energies):\n \"\"\"Copy a WCS object, and add on the energy axis.\n\n Parameters\n ----------\n wcs : `~astropy.wcs.WCS`\n WCS\n energies : array-like\n Array of energies.\n \"\"\"\n if wcs.naxis != 2:\n raise Exception(\n 'wcs_add_energy_axis, input WCS naxis != 2 %i' % wcs.naxis)\n w = WCS(naxis=3)\n w.wcs.crpix[0] = wcs.wcs.crpix[0]\n w.wcs.crpix[1] = wcs.wcs.crpix[1]\n w.wcs.ctype[0] = wcs.wcs.ctype[0]\n w.wcs.ctype[1] = wcs.wcs.ctype[1]\n w.wcs.crval[0] = wcs.wcs.crval[0]\n w.wcs.crval[1] = wcs.wcs.crval[1]\n w.wcs.cdelt[0] = wcs.wcs.cdelt[0]\n w.wcs.cdelt[1] = wcs.wcs.cdelt[1]\n w = WCS(w.to_header())\n w.wcs.crpix[2] = 1\n w.wcs.crval[2] = energies[0]\n w.wcs.cdelt[2] = energies[1] - energies[0]\n w.wcs.ctype[2] = 'Energy'\n return w"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef offset_to_sky(skydir, offset_lon, offset_lat,\n coordsys='CEL', projection='AIT'):\n \"\"\"Convert a cartesian offset (X,Y) in the given projection into\n a pair of spherical coordinates.\"\"\"\n\n offset_lon = np.array(offset_lon, ndmin=1)\n offset_lat = np.array(offset_lat, ndmin=1)\n\n w = create_wcs(skydir, coordsys, projection)\n pixcrd = np.vstack((offset_lon, offset_lat)).T\n\n return w.wcs_pix2world(pixcrd, 0)", "response": "Convert a cartesian offset in the given projection into a pair of spherical coordinates."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef sky_to_offset(skydir, lon, lat, coordsys='CEL', projection='AIT'):\n\n w = create_wcs(skydir, coordsys, projection)\n skycrd = np.vstack((lon, lat)).T\n\n if len(skycrd) == 0:\n return skycrd\n\n return w.wcs_world2pix(skycrd, 0)", "response": "Convert a sky coordinates to a projected offset."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef offset_to_skydir(skydir, offset_lon, offset_lat,\n coordsys='CEL', projection='AIT'):\n \"\"\"Convert a cartesian offset (X,Y) in the given projection into\n a SkyCoord.\"\"\"\n\n offset_lon = np.array(offset_lon, ndmin=1)\n offset_lat = np.array(offset_lat, ndmin=1)\n\n w = create_wcs(skydir, coordsys, projection)\n return SkyCoord.from_pixel(offset_lon, offset_lat, w, 0)", "response": "Convert a cartesian offset in the given projection into a SkyCoord."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nconverts a SkyCoord object to a numpy array of pixel coordinates.", "response": "def skydir_to_pix(skydir, wcs):\n \"\"\"Convert skydir object to pixel coordinates.\n\n Gracefully handles 0-d coordinate arrays.\n\n Parameters\n ----------\n skydir : `~astropy.coordinates.SkyCoord`\n\n wcs : `~astropy.wcs.WCS`\n\n Returns\n -------\n xp, yp : `numpy.ndarray`\n The pixel coordinates\n\n \"\"\"\n if len(skydir.shape) > 0 and len(skydir) == 0:\n return [np.empty(0), np.empty(0)]\n\n return skydir.to_pixel(wcs, origin=0)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef pix_to_skydir(xpix, ypix, wcs):\n xpix = np.array(xpix)\n ypix = np.array(ypix)\n\n if xpix.ndim > 0 and len(xpix) == 0:\n return SkyCoord(np.empty(0), np.empty(0), unit='deg',\n frame='icrs')\n\n return SkyCoord.from_pixel(xpix, ypix, wcs,\n origin=0).transform_to('icrs')", "response": "Convert pixel coordinates to a skydir object."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ngenerate a sequence of bin edge vectors corresponding to the axes of a WCS object.", "response": "def wcs_to_axes(w, npix):\n \"\"\"Generate a sequence of bin edge vectors corresponding to the\n axes of a WCS object.\"\"\"\n\n npix = npix[::-1]\n\n x = np.linspace(-(npix[0]) / 2., (npix[0]) / 2.,\n npix[0] + 1) * np.abs(w.wcs.cdelt[0])\n y = np.linspace(-(npix[1]) / 2., (npix[1]) / 2.,\n npix[1] + 1) * np.abs(w.wcs.cdelt[1])\n\n if w.wcs.naxis == 2:\n return x, y\n\n cdelt2 = np.log10((w.wcs.cdelt[2] + w.wcs.crval[2]) / w.wcs.crval[2])\n\n z = (np.linspace(0, npix[2], npix[2] + 1)) * cdelt2\n z += np.log10(w.wcs.crval[2])\n\n return x, y, z"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngenerates an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.", "response": "def wcs_to_coords(w, shape):\n \"\"\"Generate an N x D list of pixel center coordinates where N is\n the number of pixels and D is the dimensionality of the map.\"\"\"\n if w.naxis == 2:\n y, x = wcs_to_axes(w, shape)\n elif w.naxis == 3:\n z, y, x = wcs_to_axes(w, shape)\n else:\n raise Exception(\"Wrong number of WCS axes %i\" % w.naxis)\n\n x = 0.5 * (x[1:] + x[:-1])\n y = 0.5 * (y[1:] + y[:-1])\n\n if w.naxis == 2:\n x = np.ravel(np.ones(shape) * x[:, np.newaxis])\n y = np.ravel(np.ones(shape) * y[np.newaxis, :])\n return np.vstack((x, y))\n\n z = 0.5 * (z[1:] + z[:-1])\n x = np.ravel(np.ones(shape) * x[:, np.newaxis, np.newaxis])\n y = np.ravel(np.ones(shape) * y[np.newaxis, :, np.newaxis])\n z = np.ravel(np.ones(shape) * z[np.newaxis, np.newaxis, :])\n\n return np.vstack((x, y, z))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_cel_to_gal_angle(skydir):\n wcs0 = create_wcs(skydir, coordsys='CEL')\n wcs1 = create_wcs(skydir, coordsys='GAL')\n x, y = SkyCoord.to_pixel(SkyCoord.from_pixel(1.0, 0.0, wcs0), wcs1)\n return np.arctan2(y, x)", "response": "Calculate the rotation angle between the longitude\n axes of a local projection in celestial and galactic coordinates."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef extract_mapcube_region(infile, skydir, outfile, maphdu=0):\n\n h = fits.open(os.path.expandvars(infile))\n\n npix = 200\n shape = list(h[maphdu].data.shape)\n shape[1] = 200\n shape[2] = 200\n\n wcs = WCS(h[maphdu].header)\n skywcs = WCS(h[maphdu].header, naxis=[1, 2])\n coordsys = get_coordsys(skywcs)\n\n region_wcs = wcs.deepcopy()\n\n if coordsys == 'CEL':\n region_wcs.wcs.crval[0] = skydir.ra.deg\n region_wcs.wcs.crval[1] = skydir.dec.deg\n elif coordsys == 'GAL':\n region_wcs.wcs.crval[0] = skydir.galactic.l.deg\n region_wcs.wcs.crval[1] = skydir.galactic.b.deg\n else:\n raise Exception('Unrecognized coordinate system.')\n\n region_wcs.wcs.crpix[0] = npix // 2 + 0.5\n region_wcs.wcs.crpix[1] = npix // 2 + 0.5\n\n from reproject import reproject_interp\n data, footprint = reproject_interp(h, region_wcs.to_header(),\n hdu_in=maphdu,\n shape_out=shape)\n\n hdu_image = fits.PrimaryHDU(data, header=region_wcs.to_header())\n hdulist = fits.HDUList([hdu_image, h['ENERGIES']])\n hdulist.writeto(outfile, clobber=True)", "response": "Extract a region out of an all - sky mapcube file."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the angular distance from the given direction and the edge of the projection.", "response": "def distance_to_edge(self, skydir):\n \"\"\"Return the angular distance from the given direction and\n the edge of the projection.\"\"\"\n\n xpix, ypix = skydir.to_pixel(self.wcs, origin=0)\n deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0],\n ndmin=1)\n deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1],\n ndmin=1)\n\n deltax = np.abs(deltax) - 0.5 * self._width[0]\n deltay = np.abs(deltay) - 0.5 * self._width[1]\n\n m0 = (deltax < 0) & (deltay < 0)\n m1 = (deltax > 0) & (deltay < 0)\n m2 = (deltax < 0) & (deltay > 0)\n m3 = (deltax > 0) & (deltay > 0)\n mx = np.abs(deltax) <= np.abs(deltay)\n my = np.abs(deltay) < np.abs(deltax)\n\n delta = np.zeros(len(deltax))\n delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1]\n delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2]\n return delta"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef readlines(arg):\n fin = open(arg)\n lines_in = fin.readlines()\n fin.close()\n lines_out = []\n for line in lines_in:\n line = line.strip()\n if not line or line[0] == '#':\n continue\n lines_out.append(line)\n return lines_out", "response": "Read lines from a file into a list."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_inputlist(arglist):\n lines = []\n if isinstance(arglist, list):\n for arg in arglist:\n if os.path.splitext(arg)[1] == '.lst':\n lines += readlines(arg)\n else:\n lines.append(arg)\n elif is_null(arglist):\n pass\n else:\n if os.path.splitext(arglist)[1] == '.lst':\n lines += readlines(arglist)\n else:\n lines.append(arglist)\n return lines", "response": "Read lines from a file and makes a list of file names."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nloading into a histogram.", "response": "def create_hist(self, evclass, evtype, xsep, energy, ctheta,\n fill_sep=False, fill_evtype=False):\n \"\"\"Load into a histogram.\"\"\"\n\n nevt = len(evclass)\n ebin = utils.val_to_bin(self._energy_bins, energy)\n scale = self._psf_scale[ebin]\n\n vals = [energy, ctheta]\n bins = [self._energy_bins, self._ctheta_bins]\n\n if fill_sep:\n vals += [xsep]\n bins += [self._xsep_bins]\n\n if fill_evtype:\n loopv = [self._evclass_bins[:-1], self._evtype_bins[:-1]]\n shape = [16, 16] + [len(b) - 1 for b in bins]\n else:\n loopv = [self._evclass_bins[:-1]]\n shape = [16] + [len(b) - 1 for b in bins]\n\n h = np.zeros(shape)\n for t in itertools.product(*loopv):\n\n m = (evclass[:, int(t[0])] == True)\n if fill_evtype:\n m &= (evtype[:, int(t[1])] == True)\n if not np.sum(m):\n continue\n\n z = np.vstack(vals)\n z = z[:, m]\n\n if fill_evtype:\n h[int(t[0]), int(t[1])] += np.histogramdd(z.T, bins=bins)[0]\n else:\n h[int(t[0])] += np.histogramdd(z.T, bins=bins)[0]\n\n return h"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef calc_eff(self):\n\n hists = self.hists\n hists_out = self._hists_eff\n\n cth_axis_idx = dict(evclass=2, evtype=3)\n for k in ['evclass', 'evtype']:\n\n if k == 'evclass':\n ns0 = hists['evclass_on'][4][None, ...]\n nb0 = hists['evclass_off'][4][None, ...]\n else:\n ns0 = hists['evclass_on'][4][None, None, ...]\n nb0 = hists['evclass_off'][4][None, None, ...]\n\n eff, eff_var = calc_eff(ns0, nb0,\n hists['%s_on' % k], hists['%s_off' % k],\n hists['%s_alpha' % k])\n hists_out['%s_cth_eff' % k] = eff\n hists_out['%s_cth_eff_var' % k] = eff_var\n\n eff, eff_var = calc_eff(ns0, nb0,\n hists['%s_on' % k], hists['%s_off' % k],\n hists['%s_alpha' % k],\n sum_axes=[cth_axis_idx[k]])\n hists_out['%s_eff' % k] = np.squeeze(eff)\n hists_out['%s_eff_var' % k] = np.squeeze(eff_var)", "response": "Calculate the efficiency of the resource resource."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncreate a configuration dictionary from a schema dictionary.", "response": "def create_default_config(schema):\n \"\"\"Create a configuration dictionary from a schema dictionary.\n The schema defines the valid configuration keys and their default\n values. Each element of ``schema`` should be a tuple/list\n containing (default value,docstring,type) or a dict containing a\n nested schema.\"\"\"\n\n o = {}\n for key, item in schema.items():\n\n if isinstance(item, dict):\n o[key] = create_default_config(item)\n elif isinstance(item, tuple):\n\n value, comment, item_type = item\n\n if isinstance(item_type, tuple):\n item_type = item_type[0]\n\n if value is None and (item_type == list or item_type == dict):\n value = item_type()\n\n if key in o:\n raise KeyError('Duplicate key in schema.')\n\n o[key] = value\n else:\n raise TypeError('Unrecognized type for schema dict element: %s %s' %\n (key, type(item)))\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef update_from_schema(cfg, cfgin, schema):\n cfgout = copy.deepcopy(cfg)\n for k, v in schema.items():\n\n if k not in cfgin:\n continue\n if isinstance(v, dict):\n cfgout.setdefault(k, {})\n cfgout[k] = update_from_schema(cfg[k], cfgin[k], v)\n elif v[2] is dict:\n cfgout[k] = utils.merge_dict(cfg[k], cfgin[k], add_new_keys=True)\n else:\n cfgout[k] = cfgin[k]\n\n return cfgout", "response": "Update the contents of cfg with the contents of cfgin using the schema dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef write_config(self, outfile):\n utils.write_yaml(self.config, outfile, default_flow_style=False)", "response": "Write the configuration dictionary to an output file."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a configuration dictionary from a yaml config file.", "response": "def create(cls, configfile):\n \"\"\"Create a configuration dictionary from a yaml config file.\n This function will first populate the dictionary with defaults\n taken from pre-defined configuration files. The configuration\n dictionary is then updated with the user-defined configuration\n file. Any settings defined by the user will take precedence\n over the default settings.\"\"\"\n\n # populate config dictionary with an initial set of values\n # config_logging = ConfigManager.load('logging.yaml')\n\n config = {}\n if config['fileio']['outdir'] is None:\n config['fileio']['outdir'] = os.path.abspath(\n os.path.dirname(configfile))\n\n user_config = cls.load(configfile)\n config = utils.merge_dict(config, user_config, True)\n\n config['fileio']['outdir'] = os.path.abspath(\n config['fileio']['outdir'])\n\n return config"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update_null_primary(hdu_in, hdu=None):\n if hdu is None:\n hdu = fits.PrimaryHDU(header=hdu_in.header)\n else:\n hdu = hdu_in\n hdu.header.remove('FILENAME')\n return hdu", "response": "Update a null primary HDU"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nupdates a primary HDU", "response": "def update_primary(hdu_in, hdu=None):\n \"\"\" 'Update' a primary HDU\n\n This checks hdu exists and creates it from hdu_in if it does not.\n If hdu does exist, this adds the data in hdu_in to hdu\n \"\"\"\n if hdu is None:\n hdu = fits.PrimaryHDU(data=hdu_in.data, header=hdu_in.header)\n else:\n hdu.data += hdu_in.data\n return hdu"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef update_image(hdu_in, hdu=None):\n if hdu is None:\n hdu = fits.ImageHDU(\n data=hdu_in.data, header=hdu_in.header, name=hdu_in.name)\n else:\n hdu.data += hdu_in.data\n return hdu", "response": "Update an image HDU\n "} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef update_ebounds(hdu_in, hdu=None):\n if hdu is None:\n hdu = fits.BinTableHDU(\n data=hdu_in.data, header=hdu_in.header, name=hdu_in.name)\n else:\n for col in ['CHANNEL', 'E_MIN', 'E_MAX']:\n if (hdu.data[col] != hdu_in.data[col]).any():\n raise ValueError(\"Energy bounds do not match : %s %s\" %\n (hdu.data[col], hdu_in.data[col]))\n return hdu", "response": "Update the EBOUNDS HDUHDU\n "} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nmerges together all the GTI data into one.", "response": "def merge_all_gti_data(datalist_in, nrows, first):\n \"\"\" Merge together all the GTI data\n\n Parameters\n -------\n datalist_in : list of `astropy.io.fits.BinTableHDU` data\n The GTI data that is being merged\n\n nrows : `~numpy.ndarray` of ints\n Array with the number of nrows for each object in datalist_in\n\n first : `astropy.io.fits.BinTableHDU`\n BinTableHDU to use as a template\n\n Returns\n -------\n out_hdu : `astropy.io.fits.BinTableHDU`\n BinTableHDU with the merge GTIs\n\n \"\"\"\n max_row = nrows.cumsum()\n min_row = max_row - nrows\n out_hdu = fits.BinTableHDU.from_columns(\n first.columns, header=first.header, nrows=nrows.sum())\n\n for (imin, imax, data_in) in zip(min_row, max_row, datalist_in):\n for col in first.columns:\n out_hdu.data[col.name][imin:imax] = data_in[col.name]\n\n return out_hdu"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef extract_gti_data(hdu_in):\n data = hdu_in.data\n exposure = hdu_in.header['EXPOSURE']\n tstop = hdu_in.header['TSTOP']\n return (data, exposure, tstop)", "response": "Extract some GTI related data from a FITS table HDU"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update_hpx_skymap_allsky(map_in, map_out):\n if map_out is None:\n in_hpx = map_in.hpx\n out_hpx = HPX.create_hpx(in_hpx.nside, in_hpx.nest, in_hpx.coordsys,\n None, in_hpx.ebins, None, in_hpx.conv, None)\n data_out = map_in.expanded_counts_map()\n print(data_out.shape, data_out.sum())\n map_out = HpxMap(data_out, out_hpx)\n else:\n map_out.data += map_in.expanded_counts_map()\n return map_out", "response": "Update a HEALPix skymap from map_in to map_out"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nmerging all the files in filelist assuming that they WCS counts cubes are present in the output file.", "response": "def merge_wcs_counts_cubes(filelist):\n \"\"\" Merge all the files in filelist, assuming that they WCS counts cubes\n \"\"\"\n out_prim = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_primary(fin[0], out_prim)\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n date_end = fin[0].header['DATE-END']\n else:\n fin.close()\n\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n\n hdulist = [out_prim, out_ebounds, out_gti]\n for hdu in hdulist:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n return fits.HDUList(hdulist)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmerges all the files in filelist assuming that they are HEALPix counts cubes.", "response": "def merge_hpx_counts_cubes(filelist):\n \"\"\" Merge all the files in filelist, assuming that they HEALPix counts cubes\n \"\"\"\n out_prim = None\n out_skymap = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n out_name = None\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_null_primary(fin[0], out_prim)\n out_name = fin[1].name\n\n map_in = HpxMap.create_from_hdulist(fin)\n out_skymap = update_hpx_skymap_allsky(map_in, out_skymap)\n if i == 0:\n try:\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n except KeyError:\n out_ebounds = update_energies(fin[\"ENERGIES\"], out_ebounds)\n try:\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n except KeyError:\n pass\n\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n try:\n date_end = fin[0].header['DATE-END']\n except KeyError:\n date_end = None\n else:\n fin.close()\n\n out_skymap_hdu = out_skymap.create_image_hdu(\"SKYMAP\")\n\n hdulist = [out_prim, out_skymap_hdu, out_ebounds]\n\n if len(datalist_gti) > 0:\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n hdulist.append(out_gti)\n\n for hdu in hdulist:\n if date_end:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n\n return fits.HDUList(hdulist)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nmake all the xml files for individual catalogs and components.", "response": "def _make_xml_files(catalog_info_dict, comp_info_dict):\n \"\"\"Make all the xml file for individual components\n \"\"\"\n for val in catalog_info_dict.values():\n val.roi_model.write_xml(val.srcmdl_name)\n\n for val in comp_info_dict.values():\n for val2 in val.values():\n val2.roi_model.write_xml(val2.srcmdl_name)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef build_job_configs(self, args):\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n if self._comp_dict is None or self._comp_dict_file != args['library']:\n self._comp_dict_file = args['library']\n self._comp_dict = make_catalog_comp_dict(sources=self._comp_dict_file,\n basedir=NAME_FACTORY.base_dict['basedir'])\n else:\n print (\"Using cached catalog dict from %s\" % args['library'])\n\n catalog_info_dict = self._comp_dict['catalog_info_dict']\n comp_info_dict = self._comp_dict['comp_info_dict']\n\n n_src_per_job = args['nsrc']\n\n if args['make_xml']:\n SrcmapsCatalog_SG._make_xml_files(catalog_info_dict, comp_info_dict)\n\n for catalog_name, catalog_info in catalog_info_dict.items():\n\n n_cat_src = len(catalog_info.catalog.table)\n n_job = int(math.ceil(float(n_cat_src) / n_src_per_job))\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n name_keys = dict(zcut=zcut,\n sourcekey=catalog_name,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime='none',\n fullpath=True)\n\n for i_job in range(n_job):\n full_key = \"%s_%02i\" % (key, i_job)\n srcmin = i_job * n_src_per_job\n srcmax = min(srcmin + n_src_per_job, n_cat_src)\n outfile = NAME_FACTORY.srcmaps(\n **name_keys).replace('.fits', \"_%02i.fits\" % (i_job))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[full_key] = dict(cmap=NAME_FACTORY.ccube(**name_keys),\n expcube=NAME_FACTORY.ltcube(**name_keys),\n irfs=NAME_FACTORY.irfs(**name_keys),\n bexpmap=NAME_FACTORY.bexpcube(**name_keys),\n outfile=outfile,\n logfile=logfile,\n srcmdl=catalog_info.srcmdl_name,\n evtype=comp.evtype,\n srcmin=srcmin,\n srcmax=srcmax)\n\n return job_configs", "response": "Hook to build job configurations from the given arguments"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if targets_yaml is None:\n return job_configs\n\n targets = load_yaml(targets_yaml)\n\n for target_name, target_list in targets.items():\n for targ_prof in target_list:\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n profile=targ_prof,\n fullpath=True)\n targ_key = \"%s_%s\" % (target_name, targ_prof)\n input_path = NAME_FACTORY.sedfile(**name_keys)\n output_path = input_path.replace('.fits', '.png')\n logfile = make_nfs_path(input_path.replace('.fits', '.log'))\n job_config = dict(infile=input_path,\n outfile=output_path,\n logfile=logfile)\n job_configs[targ_key] = job_config\n\n return job_configs", "response": "Hook to build job configurations based on the input files and target files."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngenerating a spectral energy distribution (SED) for a source. This function will fit the normalization of the source in each energy bin. By default the SED will be generated with the analysis energy bins but a custom binning can be defined with the ``loge_bins`` parameter. Parameters ---------- name : str Source name. prefix : str Optional string that will be prepended to all output files (FITS and rendered images). loge_bins : `~numpy.ndarray` Sequence of energies in log10(E/MeV) defining the edges of the energy bins. If this argument is None then the analysis energy bins will be used. The energies in this sequence must align with the bin edges of the underyling analysis instance. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- sed : dict Dictionary containing output of the SED analysis.", "response": "def sed(self, name, **kwargs):\n \"\"\"Generate a spectral energy distribution (SED) for a source. This\n function will fit the normalization of the source in each\n energy bin. By default the SED will be generated with the\n analysis energy bins but a custom binning can be defined with\n the ``loge_bins`` parameter.\n\n Parameters\n ----------\n name : str\n Source name.\n\n prefix : str\n Optional string that will be prepended to all output files\n (FITS and rendered images).\n\n loge_bins : `~numpy.ndarray`\n Sequence of energies in log10(E/MeV) defining the edges of\n the energy bins. If this argument is None then the\n analysis energy bins will be used. The energies in this\n sequence must align with the bin edges of the underyling\n analysis instance.\n\n {options}\n\n optimizer : dict\n Dictionary that overrides the default optimizer settings.\n\n Returns\n -------\n sed : dict\n Dictionary containing output of the SED analysis.\n\n \"\"\"\n timer = Timer.create(start=True)\n name = self.roi.get_source_by_name(name).name\n\n # Create schema for method configuration\n schema = ConfigSchema(self.defaults['sed'],\n optimizer=self.defaults['optimizer'])\n schema.add_option('prefix', '')\n schema.add_option('outfile', None, '', str)\n schema.add_option('loge_bins', None, '', list)\n config = utils.create_dict(self.config['sed'],\n optimizer=self.config['optimizer'])\n config = schema.create_config(config, **kwargs)\n\n self.logger.info('Computing SED for %s' % name)\n\n o = self._make_sed(name, **config)\n\n self.logger.info('Finished SED')\n\n outfile = config.get('outfile', None)\n if outfile is None:\n outfile = utils.format_filename(self.workdir, 'sed',\n prefix=[config['prefix'],\n name.lower().replace(' ', '_')])\n else:\n outfile = os.path.join(self.workdir,\n os.path.splitext(outfile)[0])\n\n o['file'] = None\n if config['write_fits']:\n o['file'] = os.path.basename(outfile) + '.fits'\n self._make_sed_fits(o, outfile + '.fits', **config)\n\n if config['write_npy']:\n np.save(outfile + '.npy', o)\n\n if config['make_plots']:\n self._plotter.make_sed_plots(o, **config)\n\n self.logger.info('Execution time: %.2f s', timer.elapsed_time)\n return o"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsaving the ROI model as an XML", "response": "def _write_xml(xmlfile, srcs):\n \"\"\"Save the ROI model as an XML \"\"\"\n root = ElementTree.Element('source_library')\n root.set('title', 'source_library')\n\n for src in srcs:\n src.write_xml(root)\n\n output_file = open(xmlfile, 'w')\n output_file.write(utils.prettify_xml(root))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nhandle a component dictionary and write the xml for the component.", "response": "def _handle_component(sourcekey, comp_dict):\n \"\"\"Make the source objects and write the xml for a component\n \"\"\"\n if comp_dict.comp_key is None:\n fullkey = sourcekey\n else:\n fullkey = \"%s_%s\" % (sourcekey, comp_dict.comp_key)\n srcdict = make_sources(fullkey, comp_dict)\n if comp_dict.model_type == 'IsoSource':\n print(\"Writing xml for %s to %s: %s %s\" % (fullkey,\n comp_dict.srcmdl_name,\n comp_dict.model_type,\n comp_dict.Spectral_Filename))\n elif comp_dict.model_type == 'MapCubeSource':\n print(\"Writing xml for %s to %s: %s %s\" % (fullkey,\n comp_dict.srcmdl_name,\n comp_dict.model_type,\n comp_dict.Spatial_Filename))\n SrcmapsDiffuse_SG._write_xml(comp_dict.srcmdl_name, srcdict.values())"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _make_xml_files(diffuse_comp_info_dict):\n try:\n os.makedirs('srcmdls')\n except OSError:\n pass\n\n for sourcekey in sorted(diffuse_comp_info_dict.keys()):\n comp_info = diffuse_comp_info_dict[sourcekey]\n if comp_info.components is None:\n SrcmapsDiffuse_SG._handle_component(sourcekey, comp_info)\n else:\n for sub_comp_info in comp_info.components.values():\n SrcmapsDiffuse_SG._handle_component(sourcekey, sub_comp_info)", "response": "Make all the xml files for all the components in the diffuse_comp_info_dict."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nhooking to build job configurations from the given arguments.", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n ret_dict = make_diffuse_comp_info_dict(components=components,\n library=args['library'],\n basedir='.')\n diffuse_comp_info_dict = ret_dict['comp_info_dict']\n if args['make_xml']:\n SrcmapsDiffuse_SG._make_xml_files(diffuse_comp_info_dict)\n\n for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()):\n diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key]\n no_psf = diffuse_comp_info_value.no_psf\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n if diffuse_comp_info_value.components is None:\n sub_comp_info = diffuse_comp_info_value\n else:\n sub_comp_info = diffuse_comp_info_value.get_component_info(comp)\n name_keys = dict(zcut=zcut,\n sourcekey=sub_comp_info.sourcekey,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n mktime='none',\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n fullpath=True)\n\n kmin = 0\n kmax = comp.enumbins + 1\n outfile_base = NAME_FACTORY.srcmaps(**name_keys)\n kstep = HPX_ORDER_TO_KSTEP[comp.hpx_order]\n base_dict = dict(cmap=NAME_FACTORY.ccube(**name_keys),\n expcube=NAME_FACTORY.ltcube(**name_keys),\n irfs=NAME_FACTORY.irfs(**name_keys),\n bexpmap=NAME_FACTORY.bexpcube(**name_keys),\n srcmdl=sub_comp_info.srcmdl_name,\n source=sub_comp_info.source_name,\n no_psf=no_psf,\n evtype=comp.evtype)\n\n if kstep < 0:\n kstep = kmax\n else:\n pass\n\n for k in range(kmin, kmax, kstep):\n full_key = \"%s_%s_%02i\" % (diffuse_comp_info_key, key, k)\n khi = min(kmax, k + kstep)\n\n full_dict = base_dict.copy()\n outfile = outfile_base.replace('.fits', '_%02i.fits' % k)\n logfile = make_nfs_path(outfile_base.replace('.fits', '_%02i.log' % k))\n full_dict.update(dict(outfile=outfile,\n kmin=k, kmax=khi,\n logfile=logfile))\n job_configs[full_key] = full_dict\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef fit_error_ellipse(tsmap, xy=None, dpix=3, zmin=None):\n\n if xy is None:\n ix, iy = np.unravel_index(np.argmax(tsmap.data.T),\n tsmap.data.T.shape)\n else:\n ix, iy = xy\n\n pbfit0 = utils.fit_parabola(tsmap.data.T, ix, iy, dpix=1.5)\n pbfit1 = utils.fit_parabola(tsmap.data.T, ix, iy, dpix=dpix,\n zmin=zmin)\n\n wcs = tsmap.geom.wcs\n cdelt0 = tsmap.geom.wcs.wcs.cdelt[0]\n cdelt1 = tsmap.geom.wcs.wcs.cdelt[1]\n npix0 = tsmap.data.T.shape[0]\n npix1 = tsmap.data.T.shape[1]\n\n o = {}\n o['fit_success'] = pbfit0['fit_success']\n o['fit_inbounds'] = True\n\n if pbfit0['fit_success']:\n o['xpix'] = pbfit0['x0']\n o['ypix'] = pbfit0['y0']\n o['zoffset'] = pbfit0['z0']\n else:\n o['xpix'] = float(ix)\n o['ypix'] = float(iy)\n o['zoffset'] = tsmap.data.T[ix, iy]\n\n if pbfit1['fit_success']:\n sigmax = 2.0**0.5 * pbfit1['sigmax'] * np.abs(cdelt0)\n sigmay = 2.0**0.5 * pbfit1['sigmay'] * np.abs(cdelt1)\n theta = pbfit1['theta']\n sigmax = min(sigmax, np.abs(2.0 * npix0 * cdelt0))\n sigmay = min(sigmay, np.abs(2.0 * npix1 * cdelt1))\n elif pbfit0['fit_success']:\n sigmax = 2.0**0.5 * pbfit0['sigmax'] * np.abs(cdelt0)\n sigmay = 2.0**0.5 * pbfit0['sigmay'] * np.abs(cdelt1)\n theta = pbfit0['theta']\n sigmax = min(sigmax, np.abs(2.0 * npix0 * cdelt0))\n sigmay = min(sigmay, np.abs(2.0 * npix1 * cdelt1))\n else:\n pix_area = np.abs(cdelt0) * np.abs(cdelt1)\n mask = get_region_mask(tsmap.data, 1.0, (ix, iy))\n area = np.sum(mask) * pix_area\n sigmax = (area / np.pi)**0.5\n sigmay = (area / np.pi)**0.5\n theta = 0.0\n\n if (o['xpix'] <= 0 or o['xpix'] >= npix0 - 1 or\n o['ypix'] <= 0 or o['ypix'] >= npix1 - 1):\n o['fit_inbounds'] = False\n o['xpix'] = float(ix)\n o['ypix'] = float(iy)\n\n o['peak_offset'] = np.sqrt((float(ix) - o['xpix'])**2 +\n (float(iy) - o['ypix'])**2)\n\n skydir = SkyCoord.from_pixel(o['xpix'], o['ypix'], wcs)\n sigma = (sigmax * sigmay)**0.5\n r68 = 2.30**0.5 * sigma\n r95 = 5.99**0.5 * sigma\n r99 = 9.21**0.5 * sigma\n\n if sigmax < sigmay:\n o['pos_err_semimajor'] = sigmay\n o['pos_err_semiminor'] = sigmax\n o['theta'] = np.fmod(2 * np.pi + np.pi / 2. + theta, np.pi)\n else:\n o['pos_err_semimajor'] = sigmax\n o['pos_err_semiminor'] = sigmay\n o['theta'] = np.fmod(2 * np.pi + theta, np.pi)\n\n o['pos_angle'] = np.degrees(o['theta'])\n o['pos_err'] = sigma\n o['pos_r68'] = r68\n o['pos_r95'] = r95\n o['pos_r99'] = r99\n o['ra'] = skydir.icrs.ra.deg\n o['dec'] = skydir.icrs.dec.deg\n o['glon'] = skydir.galactic.l.deg\n o['glat'] = skydir.galactic.b.deg\n a = o['pos_err_semimajor']\n b = o['pos_err_semiminor']\n\n o['pos_ecc'] = np.sqrt(1 - b**2 / a**2)\n o['pos_ecc2'] = np.sqrt(a**2 / b**2 - 1)\n o['skydir'] = skydir\n\n if tsmap.geom.coordsys == 'GAL':\n gal_cov = utils.ellipse_to_cov(o['pos_err_semimajor'],\n o['pos_err_semiminor'],\n o['theta'])\n theta_cel = wcs_utils.get_cel_to_gal_angle(skydir)\n cel_cov = utils.ellipse_to_cov(o['pos_err_semimajor'],\n o['pos_err_semiminor'],\n o['theta'] + theta_cel)\n\n else:\n cel_cov = utils.ellipse_to_cov(o['pos_err_semimajor'],\n o['pos_err_semiminor'],\n o['theta'])\n theta_gal = 2 * np.pi - wcs_utils.get_cel_to_gal_angle(skydir)\n gal_cov = utils.ellipse_to_cov(o['pos_err_semimajor'],\n o['pos_err_semiminor'],\n o['theta'] + theta_gal)\n\n o['pos_gal_cov'] = gal_cov\n o['pos_cel_cov'] = cel_cov\n o['pos_gal_corr'] = utils.cov_to_correlation(gal_cov)\n o['pos_cel_corr'] = utils.cov_to_correlation(cel_cov)\n o['glon_err'], o['glat_err'] = np.sqrt(\n gal_cov[0, 0]), np.sqrt(gal_cov[1, 1])\n o['ra_err'], o['dec_err'] = np.sqrt(cel_cov[0, 0]), np.sqrt(cel_cov[1, 1])\n\n return o", "response": "Fit an ellipse from a TS map to a position uncertainty ellipse."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef find_peaks(input_map, threshold, min_separation=0.5):\n\n data = input_map.data\n\n cdelt = max(input_map.geom.wcs.wcs.cdelt)\n min_separation = max(min_separation, 2 * cdelt)\n\n region_size_pix = int(min_separation / cdelt)\n region_size_pix = max(3, region_size_pix)\n\n deltaxy = utils.make_pixel_distance(region_size_pix * 2 + 3)\n deltaxy *= max(input_map.geom.wcs.wcs.cdelt)\n region = deltaxy < min_separation\n\n local_max = maximum_filter(data, footprint=region) == data\n local_max[data < threshold] = False\n\n labeled, num_objects = scipy.ndimage.label(local_max)\n slices = scipy.ndimage.find_objects(labeled)\n\n peaks = []\n for s in slices:\n skydir = SkyCoord.from_pixel(s[1].start, s[0].start,\n input_map.geom.wcs)\n peaks.append({'ix': s[1].start,\n 'iy': s[0].start,\n 'skydir': skydir,\n 'amp': data[s[0].start, s[1].start]})\n\n return sorted(peaks, key=lambda t: t['amp'], reverse=True)", "response": "Find peaks in a 2 - D map object that have amplitude larger than threshold and lie a distance at least min_separation from another 2 - D map object."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsolves for the position and uncertainty of source in one dimension assuming that the source is near the maximum and the errors are parabolic.", "response": "def estimate_pos_and_err_parabolic(tsvals):\n \"\"\"Solve for the position and uncertainty of source in one dimension\n assuming that you are near the maximum and the errors are parabolic\n\n Parameters\n ----------\n tsvals : `~numpy.ndarray`\n The TS values at the maximum TS, and for each pixel on either side\n\n Returns\n -------\n The position and uncertainty of the source, in pixel units\n w.r.t. the center of the maximum pixel\n\n \"\"\"\n a = tsvals[2] - tsvals[0]\n bc = 2. * tsvals[1] - tsvals[0] - tsvals[2]\n s = a / (2 * bc)\n err = np.sqrt(2 / bc)\n return s, err"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nsolve for the position and uncertainty of source assuming that the maximum and the errors are parabolic.", "response": "def refine_peak(tsmap, pix):\n \"\"\"Solve for the position and uncertainty of source assuming that you\n are near the maximum and the errors are parabolic\n\n Parameters\n ----------\n tsmap : `~numpy.ndarray`\n Array with the TS data.\n\n Returns\n -------\n The position and uncertainty of the source, in pixel units\n w.r.t. the center of the maximum pixel\n\n \"\"\"\n # Note the annoying WCS convention\n nx = tsmap.shape[1]\n ny = tsmap.shape[0]\n\n if pix[0] == 0 or pix[0] == (nx - 1):\n xval = float(pix[0])\n xerr = -1\n else:\n x_arr = tsmap[pix[1], pix[0] - 1:pix[0] + 2]\n xval, xerr = estimate_pos_and_err_parabolic(x_arr)\n xval += float(pix[0])\n\n if pix[1] == 0 or pix[1] == (ny - 1):\n yval = float(pix[1])\n yerr = -1\n else:\n y_arr = tsmap[pix[1] - 1:pix[1] + 2, pix[0]]\n yval, yerr = estimate_pos_and_err_parabolic(y_arr)\n yval += float(pix[1])\n\n return (xval, yval), (xerr, yerr)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating an empty source table.", "response": "def create_source_table(scan_shape):\n \"\"\"Create an empty source table.\n\n Returns\n -------\n tab : `~astropy.table.Table`\n \"\"\"\n\n cols_dict = collections.OrderedDict()\n cols_dict['Source_Name'] = dict(dtype='S48', format='%s')\n cols_dict['name'] = dict(dtype='S48', format='%s')\n cols_dict['class'] = dict(dtype='S32', format='%s')\n cols_dict['SpectrumType'] = dict(dtype='S32', format='%s')\n cols_dict['SpatialType'] = dict(dtype='S32', format='%s')\n cols_dict['SourceType'] = dict(dtype='S32', format='%s')\n cols_dict['SpatialModel'] = dict(dtype='S32', format='%s')\n cols_dict['RAJ2000'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['DEJ2000'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['GLON'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['GLAT'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['ts'] = dict(dtype='f8', format='%.3f')\n cols_dict['loglike'] = dict(dtype='f8', format='%.3f')\n cols_dict['npred'] = dict(dtype='f8', format='%.3f')\n cols_dict['offset'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['offset_ra'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['offset_dec'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['offset_glon'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['offset_glat'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['offset_roi_edge'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['pivot_energy'] = dict(dtype='f8', format='%.3f', unit='MeV')\n cols_dict['flux_scan'] = dict(dtype='f8', format='%.3f',\n shape=scan_shape)\n cols_dict['eflux_scan'] = dict(dtype='f8', format='%.3f',\n shape=scan_shape)\n cols_dict['norm_scan'] = dict(dtype='f8', format='%.3f',\n shape=scan_shape)\n cols_dict['dloglike_scan'] = dict(dtype='f8', format='%.3f',\n shape=scan_shape)\n cols_dict['loglike_scan'] = dict(dtype='f8', format='%.3f',\n shape=scan_shape)\n\n # Add source dictionary columns\n for k, v in sorted(defaults.source_output.items()):\n if not k in cols_dict.keys():\n if v[2] == float:\n cols_dict[k] = dict(dtype='f8', format='%f')\n elif k == 'Spectrum_Filename' or k == 'Spatial_Filename':\n cols_dict[k] = dict(dtype='S128', format='%s')\n elif v[2] == str:\n cols_dict[k] = dict(dtype='S32', format='%s')\n\n cols_dict['param_names'] = dict(dtype='S32', format='%s', shape=(10,))\n cols_dict['param_values'] = dict(dtype='f8', format='%f', shape=(10,))\n cols_dict['param_errors'] = dict(dtype='f8', format='%f', shape=(10,))\n\n # Catalog Parameters\n cols_dict['Flux_Density'] = dict(\n dtype='f8', format='%.5g', unit='1 / (MeV cm2 s)')\n cols_dict['Spectral_Index'] = dict(dtype='f8', format='%.3f')\n cols_dict['Pivot_Energy'] = dict(dtype='f8', format='%.3f', unit='MeV')\n cols_dict['beta'] = dict(dtype='f8', format='%.3f')\n cols_dict['Exp_Index'] = dict(dtype='f8', format='%.3f')\n cols_dict['Cutoff'] = dict(dtype='f8', format='%.3f', unit='MeV')\n cols_dict['Expfactor'] = dict(dtype='f8', format='%.3f')\n\n cols_dict['Conf_68_PosAng'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['Conf_68_SemiMajor'] = dict(\n dtype='f8', format='%.3f', unit='deg')\n cols_dict['Conf_68_SemiMinor'] = dict(\n dtype='f8', format='%.3f', unit='deg')\n cols_dict['Conf_95_PosAng'] = dict(dtype='f8', format='%.3f', unit='deg')\n cols_dict['Conf_95_SemiMajor'] = dict(\n dtype='f8', format='%.3f', unit='deg')\n cols_dict['Conf_95_SemiMinor'] = dict(\n dtype='f8', format='%.3f', unit='deg')\n\n for t in ['eflux', 'eflux100', 'eflux1000', 'eflux10000']:\n cols_dict[t] = dict(dtype='f8', format='%.3f', unit='MeV / (cm2 s)')\n cols_dict[t + '_err'] = dict(dtype='f8',\n format='%.3f', unit='MeV / (cm2 s)')\n\n for t in ['eflux_ul95', 'eflux100_ul95', 'eflux1000_ul95', 'eflux10000_ul95']:\n cols_dict[t] = dict(dtype='f8', format='%.3f', unit='MeV / (cm2 s)')\n\n for t in ['flux', 'flux100', 'flux1000', 'flux10000']:\n cols_dict[t] = dict(dtype='f8', format='%.3f', unit='1 / (cm2 s)')\n cols_dict[t + '_err'] = dict(dtype='f8',\n format='%.3f', unit='1 / (cm2 s)')\n\n for t in ['flux_ul95', 'flux100_ul95', 'flux1000_ul95', 'flux10000_ul95']:\n cols_dict[t] = dict(dtype='f8', format='%.3f', unit='1 / (cm2 s)')\n\n for t in ['dnde', 'dnde100', 'dnde1000', 'dnde10000']:\n cols_dict[t] = dict(dtype='f8', format='%.3f', unit='1 / (MeV cm2 s)')\n cols_dict[t + '_err'] = dict(dtype='f8',\n format='%.3f', unit='1 / (MeV cm2 s)')\n\n cols = [Column(name=k, **v) for k, v in cols_dict.items()]\n tab = Table(cols)\n return tab"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_skydir_distance_mask(src_skydir, skydir, dist, min_dist=None,\n square=False, coordsys='CEL'):\n \"\"\"Retrieve sources within a certain angular distance of an\n (ra,dec) coordinate. This function supports two types of\n geometric selections: circular (square=False) and square\n (square=True). The circular selection finds all sources with a given\n angular distance of the target position. The square selection\n finds sources within an ROI-like region of size R x R where R\n = 2 x dist.\n\n Parameters\n ----------\n\n src_skydir : `~astropy.coordinates.SkyCoord` \n Array of sky directions.\n\n skydir : `~astropy.coordinates.SkyCoord` \n Sky direction with respect to which the selection will be applied.\n\n dist : float\n Maximum distance in degrees from the sky coordinate.\n\n square : bool\n Choose whether to apply a circular or square selection.\n\n coordsys : str\n Coordinate system to use when applying a selection with square=True.\n\n \"\"\"\n\n if dist is None:\n dist = 180.\n\n if not square:\n dtheta = src_skydir.separation(skydir).rad\n elif coordsys == 'CEL':\n dtheta = get_linear_dist(skydir,\n src_skydir.ra.rad,\n src_skydir.dec.rad,\n coordsys=coordsys)\n elif coordsys == 'GAL':\n dtheta = get_linear_dist(skydir,\n src_skydir.galactic.l.rad,\n src_skydir.galactic.b.rad,\n coordsys=coordsys)\n else:\n raise Exception('Unrecognized coordinate system: %s' % coordsys)\n\n msk = (dtheta < np.radians(dist))\n if min_dist is not None:\n msk &= (dtheta > np.radians(min_dist))\n return msk", "response": "Returns a mask of sources within a certain angular distance of a given sky direction."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef spectral_pars_from_catalog(cat):\n\n spectrum_type = cat['SpectrumType']\n pars = get_function_defaults(cat['SpectrumType'])\n par_idxs = {k: i for i, k in\n enumerate(get_function_par_names(cat['SpectrumType']))}\n\n for k in pars:\n pars[k]['value'] = cat['param_values'][par_idxs[k]]\n\n if spectrum_type == 'PowerLaw':\n\n pars['Index']['value'] *= -1.0\n pars['Index']['scale'] = -1.0\n pars['Scale']['scale'] = 1.0\n pars['Index']['max'] = max(5.0, pars['Index']['value'] + 1.0)\n pars['Index']['min'] = min(0.0, pars['Index']['value'] - 1.0)\n pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])\n pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)\n pars['Index'] = make_parameter_dict(pars['Index'], False, False)\n\n elif spectrum_type == 'LogParabola':\n\n pars['norm'] = make_parameter_dict(pars['norm'], False, True)\n pars['Eb'] = make_parameter_dict(pars['Eb'], True, False)\n pars['alpha'] = make_parameter_dict(pars['alpha'], False, False)\n pars['beta'] = make_parameter_dict(pars['beta'], False, False)\n\n elif spectrum_type == 'PLSuperExpCutoff':\n\n pars['Index1']['value'] *= -1.0\n pars['Index1']['scale'] = -1.0\n pars['Index2']['scale'] = 1.0\n pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])\n pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)\n pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)\n pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)\n pars['Cutoff'] = make_parameter_dict(pars['Cutoff'], False, True)\n\n elif spectrum_type == 'PLSuperExpCutoff2':\n\n pars['Index1']['value'] *= -1.0\n pars['Index1']['scale'] = -1.0\n pars['Index2']['scale'] = 1.0\n pars['Prefactor'] = make_parameter_dict(pars['Prefactor'])\n pars['Scale'] = make_parameter_dict(pars['Scale'], True, False)\n pars['Index1'] = make_parameter_dict(pars['Index1'], False, False)\n pars['Index2'] = make_parameter_dict(pars['Index2'], False, False)\n pars['Expfactor'] = make_parameter_dict(pars['Expfactor'], False, True)\n else:\n raise Exception('Unsupported spectral type:' + spectrum_type)\n\n return pars", "response": "Create spectral parameters from 3FGL catalog columns."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef is_free(self):\n return bool(np.array([int(value.get(\"free\", False)) for key, value in self.spectral_pars.items()]).sum())", "response": "returns True if any of the spectral model parameters is set to free else False\n "} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef set_position(self, skydir):\n\n if not isinstance(skydir, SkyCoord):\n skydir = SkyCoord(ra=skydir[0], dec=skydir[1], unit=u.deg)\n\n if not skydir.isscalar:\n skydir = np.ravel(skydir)[0]\n\n radec = np.array([skydir.icrs.ra.deg, skydir.icrs.dec.deg])\n self._set_radec(radec)", "response": "Sets the position of the source object."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns a SkyCoord representation of the source position.", "response": "def skydir(self):\n \"\"\"Return a SkyCoord representation of the source position.\n\n Returns\n -------\n skydir : `~astropy.coordinates.SkyCoord` \n \"\"\"\n return SkyCoord(self.radec[0] * u.deg, self.radec[1] * u.deg)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef create_from_dict(cls, src_dict, roi_skydir=None, rescale=False):\n src_dict = copy.deepcopy(src_dict)\n src_dict.setdefault('SpatialModel', 'PointSource')\n src_dict.setdefault('Spectrum_Filename', None)\n src_dict.setdefault('SpectrumType', 'PowerLaw')\n src_dict['SpatialType'] = get_spatial_type(src_dict['SpatialModel'])\n\n spectrum_type = src_dict['SpectrumType']\n spatial_type = src_dict['SpatialType']\n\n spectral_pars = src_dict.pop('spectral_pars', {})\n spatial_pars = src_dict.pop('spatial_pars', {})\n\n if not spectral_pars:\n spectral_pars = extract_pars_from_dict(spectrum_type, src_dict)\n norm_par_name = get_function_norm_par_name(spectrum_type)\n if norm_par_name is not None:\n spectral_pars[norm_par_name].setdefault('free', True)\n\n if not spatial_pars:\n spatial_pars = extract_pars_from_dict(spatial_type, src_dict)\n for k in ['RA', 'DEC', 'Prefactor']:\n if k in spatial_pars:\n del spatial_pars[k]\n\n spectral_pars = create_pars_from_dict(spectrum_type, spectral_pars,\n rescale)\n spatial_pars = create_pars_from_dict(spatial_type, spatial_pars,\n False)\n\n if 'file' in src_dict:\n src_dict['Spectrum_Filename'] = src_dict.pop('file')\n\n if spectrum_type == 'DMFitFunction' and src_dict['Spectrum_Filename'] is None:\n src_dict['Spectrum_Filename'] = os.path.join('$FERMIPY_DATA_DIR',\n 'gammamc_dif.dat')\n\n src_dict['spectral_pars'] = cast_pars_dict(spectral_pars)\n src_dict['spatial_pars'] = cast_pars_dict(spatial_pars)\n\n if 'name' in src_dict:\n name = src_dict['name']\n src_dict['Source_Name'] = src_dict.pop('name')\n elif 'Source_Name' in src_dict:\n name = src_dict['Source_Name']\n else:\n raise Exception('Source name undefined.')\n\n skydir = wcs_utils.get_target_skydir(src_dict, roi_skydir)\n\n src_dict['RAJ2000'] = skydir.ra.deg\n src_dict['DEJ2000'] = skydir.dec.deg\n\n radec = np.array([skydir.ra.deg, skydir.dec.deg])\n\n return cls(name, src_dict, radec=radec)", "response": "Create a source object from a python dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create_from_xmlfile(cls, xmlfile, extdir=None):\n root = ElementTree.ElementTree(file=xmlfile).getroot()\n srcs = root.findall('source')\n if len(srcs) == 0:\n raise Exception('No sources found.')\n return cls.create_from_xml(srcs[0], extdir=extdir)", "response": "Create a Source object from an XML file."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate a Source object from an XML node.", "response": "def create_from_xml(root, extdir=None):\n \"\"\"Create a Source object from an XML node.\n\n Parameters\n ----------\n root : `~xml.etree.ElementTree.Element`\n XML node containing the source.\n\n extdir : str\n Path to the extended source archive.\n \"\"\"\n\n src_type = root.attrib['type']\n spec = utils.load_xml_elements(root, 'spectrum')\n spectral_pars = utils.load_xml_elements(root, 'spectrum/parameter')\n spectral_type = spec['type']\n spectral_pars = cast_pars_dict(spectral_pars)\n spat = {}\n spatial_pars = {}\n nested_sources = []\n\n if src_type == 'CompositeSource':\n spatial_type = 'CompositeSource'\n source_library = root.findall('source_library')[0]\n for node in source_library.findall('source'):\n nested_sources += [Source.create_from_xml(node, extdir=extdir)]\n else:\n spat = utils.load_xml_elements(root, 'spatialModel')\n spatial_pars = utils.load_xml_elements(\n root, 'spatialModel/parameter')\n spatial_pars = cast_pars_dict(spatial_pars)\n spatial_type = spat['type']\n\n xml_dict = copy.deepcopy(root.attrib)\n src_dict = {'catalog': xml_dict}\n\n src_dict['Source_Name'] = xml_dict['name']\n src_dict['SpectrumType'] = spectral_type\n src_dict['SpatialType'] = spatial_type\n src_dict['SourceType'] = src_type\n src_dict['Spatial_Filename'] = None\n src_dict['Spectrum_Filename'] = None\n if 'file' in spat:\n src_dict['Spatial_Filename'] = utils.xmlpath_to_path(spat['file'])\n if not os.path.isfile(src_dict['Spatial_Filename']) \\\n and extdir is not None:\n src_dict['Spatial_Filename'] = \\\n os.path.join(extdir, 'Templates',\n src_dict['Spatial_Filename'])\n\n if 'file' in spec:\n src_dict['Spectrum_Filename'] = utils.xmlpath_to_path(spec['file'])\n\n if src_type == 'PointSource':\n src_dict['SpatialModel'] = 'PointSource'\n elif src_type == 'CompositeSource':\n src_dict['SpatialModel'] = 'CompositeSource'\n elif spatial_type == 'SpatialMap':\n src_dict['SpatialModel'] = 'SpatialMap'\n else:\n src_dict['SpatialModel'] = spatial_type\n\n if src_type == 'PointSource' or \\\n spatial_type in ['SpatialMap', 'RadialGaussian', 'RadialDisk']:\n\n if 'RA' in xml_dict:\n src_dict['RAJ2000'] = float(xml_dict['RA'])\n src_dict['DEJ2000'] = float(xml_dict['DEC'])\n elif 'RA' in spatial_pars:\n src_dict['RAJ2000'] = float(spatial_pars['RA']['value'])\n src_dict['DEJ2000'] = float(spatial_pars['DEC']['value'])\n else:\n try:\n skydir = wcs_utils.get_map_skydir(os.path.expandvars(\n src_dict['Spatial_Filename']))\n except Exception:\n skydir = hpx_utils.get_map_skydir(os.path.expandvars(\n src_dict['Spatial_Filename']))\n src_dict['RAJ2000'] = skydir.ra.deg\n src_dict['DEJ2000'] = skydir.dec.deg\n\n radec = np.array([src_dict['RAJ2000'], src_dict['DEJ2000']])\n\n src_dict['spectral_pars'] = spectral_pars\n src_dict['spatial_pars'] = spatial_pars\n return Source(src_dict['Source_Name'],\n src_dict, radec=radec)\n\n elif src_type == 'DiffuseSource' and spatial_type == 'ConstantValue':\n return IsoSource(src_dict['Source_Name'],\n {'Spectrum_Filename': spec['file'],\n 'spectral_pars': spectral_pars,\n 'spatial_pars': spatial_pars})\n elif src_type == 'DiffuseSource' and spatial_type == 'MapCubeFunction':\n return MapCubeSource(src_dict['Source_Name'],\n {'Spatial_Filename': spat['file'],\n 'SpectrumType': spectral_type,\n 'spectral_pars': spectral_pars,\n 'spatial_pars': spatial_pars})\n elif src_type == 'CompositeSource':\n return CompositeSource(src_dict['Source_Name'],\n {'SpectrumType': spectral_type,\n 'nested_sources': nested_sources})\n else:\n raise Exception(\n 'Unrecognized type for source: %s %s' % (src_dict['Source_Name'], src_type))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef write_xml(self, root):\n\n if not self.extended:\n try:\n source_element = utils.create_xml_element(root, 'source',\n dict(name=self['Source_Name'],\n type='PointSource'))\n except TypeError as msg:\n print (self['Source_Name'], self)\n raise TypeError(msg)\n\n spat_el = ElementTree.SubElement(source_element, 'spatialModel')\n spat_el.set('type', 'SkyDirFunction')\n\n elif self['SpatialType'] == 'SpatialMap':\n source_element = utils.create_xml_element(root, 'source',\n dict(name=self['Source_Name'],\n type='DiffuseSource'))\n\n filename = utils.path_to_xmlpath(self['Spatial_Filename'])\n spat_el = utils.create_xml_element(source_element, 'spatialModel',\n dict(map_based_integral='True',\n type='SpatialMap',\n file=filename))\n else:\n source_element = utils.create_xml_element(root, 'source',\n dict(name=self['Source_Name'],\n type='DiffuseSource'))\n spat_el = utils.create_xml_element(source_element, 'spatialModel',\n dict(type=self['SpatialType']))\n\n for k, v in self.spatial_pars.items():\n utils.create_xml_element(spat_el, 'parameter', v)\n\n el = ElementTree.SubElement(source_element, 'spectrum')\n\n stype = self['SpectrumType'].strip()\n el.set('type', stype)\n\n if self['Spectrum_Filename'] is not None:\n filename = utils.path_to_xmlpath(self['Spectrum_Filename'])\n el.set('file', filename)\n\n for k, v in self.spectral_pars.items():\n utils.create_xml_element(el, 'parameter', v)", "response": "Write this source to an XML node."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nclears the contents of the ROI.", "response": "def clear(self):\n \"\"\"Clear the contents of the ROI.\"\"\"\n self._srcs = []\n self._diffuse_srcs = []\n self._src_dict = collections.defaultdict(list)\n self._src_radius = []"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _create_diffuse_src_from_xml(self, config, src_type='FileFunction'):\n diffuse_xmls = config.get('diffuse_xml')\n srcs_out = []\n for diffuse_xml in diffuse_xmls:\n srcs_out += self.load_xml(diffuse_xml, coordsys=config.get('coordsys', 'CEL'))\n return srcs_out", "response": "Load sources from an XML file."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nadd a new source from a dictionary or an existing source object.", "response": "def create_source(self, name, src_dict, build_index=True,\n merge_sources=True, rescale=True):\n \"\"\"Add a new source to the ROI model from a dictionary or an\n existing source object.\n\n Parameters\n ----------\n\n name : str\n\n src_dict : dict or `~fermipy.roi_model.Source`\n\n Returns\n -------\n\n src : `~fermipy.roi_model.Source`\n \"\"\"\n\n src_dict = copy.deepcopy(src_dict)\n\n if isinstance(src_dict, dict):\n src_dict['name'] = name\n src = Model.create_from_dict(src_dict, self.skydir,\n rescale=rescale)\n else:\n src = src_dict\n src.set_name(name)\n\n if isinstance(src, Source):\n src.set_roi_direction(self.skydir)\n src.set_roi_geom(self.geom)\n\n self.load_source(src, build_index=build_index,\n merge_sources=merge_sources)\n\n return self.get_source_by_name(name)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef load_sources(self, sources):\n\n self.clear()\n for s in sources:\n\n if isinstance(s, dict):\n s = Model.create_from_dict(s)\n\n self.load_source(s, build_index=False)\n self._build_src_index()", "response": "Delete all sources in the ROI and load the input source list."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef load_source(self, src, build_index=True, merge_sources=True,\n **kwargs):\n \"\"\"\n Load a single source.\n\n Parameters\n ----------\n\n src : `~fermipy.roi_model.Source`\n Source object that will be added to the ROI.\n\n merge_sources : bool \n When a source matches an existing source in the model\n update that source with the properties of the new source.\n\n build_index : bool \n Re-make the source index after loading this source.\n\n \"\"\"\n src = copy.deepcopy(src)\n name = src.name.replace(' ', '').lower()\n\n min_sep = kwargs.get('min_separation', None)\n\n if min_sep is not None:\n\n sep = src.skydir.separation(self._src_skydir).deg\n if len(sep) > 0 and np.min(sep) < min_sep:\n return\n\n match_srcs = self.match_source(src)\n\n if len(match_srcs) == 1:\n\n # self.logger.debug('Found matching source for %s : %s',\n # src.name, match_srcs[0].name)\n\n if merge_sources:\n match_srcs[0].update_from_source(src)\n else:\n match_srcs[0].add_name(src.name)\n\n self._add_source_alias(src.name.replace(' ', '').lower(),\n match_srcs[0])\n return\n elif len(match_srcs) > 2:\n raise Exception('Multiple sources with name %s' % name)\n\n self._add_source_alias(src.name, src)\n\n for name in src.names:\n self._add_source_alias(name.replace(' ', '').lower(), src)\n\n if isinstance(src, Source):\n self._srcs.append(src)\n else:\n self._diffuse_srcs.append(src)\n\n if build_index:\n self._build_src_index()", "response": "Load a single source into the ROI."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nlook for sources in the model that match the given source.", "response": "def match_source(self, src):\n \"\"\"Look for source or sources in the model that match the\n given source. Sources are matched by name and any association\n columns defined in the assoc_xmatch_columns parameter.\n \"\"\"\n\n srcs = []\n\n names = [src.name]\n for col in self.config['assoc_xmatch_columns']:\n if col in src.assoc and src.assoc[col]:\n names += [src.assoc[col]]\n\n for name in names:\n name = name.replace(' ', '').lower()\n if name not in self._src_dict:\n continue\n srcs += [s for s in self._src_dict[name] if s not in srcs]\n\n return srcs"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef load(self, **kwargs):\n\n coordsys = kwargs.get('coordsys', 'CEL')\n extdir = kwargs.get('extdir', self.extdir)\n srcname = kwargs.get('srcname', None)\n\n self.clear()\n self.load_diffuse_srcs()\n\n for c in self.config['catalogs']:\n\n if isinstance(c, catalog.Catalog):\n self.load_existing_catalog(c)\n continue\n\n extname = os.path.splitext(c)[1]\n if extname != '.xml':\n self.load_fits_catalog(c, extdir=extdir, coordsys=coordsys,\n srcname=srcname)\n elif extname == '.xml':\n self.load_xml(c, extdir=extdir, coordsys=coordsys)\n else:\n raise Exception('Unrecognized catalog file extension: %s' % c)\n\n for c in self.config['sources']:\n\n if 'name' not in c:\n raise Exception(\n 'No name field in source dictionary:\\n ' + str(c))\n\n self.create_source(c['name'], c, build_index=False)\n\n self._build_src_index()", "response": "Load both point source and diffuse components."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates an ROI model from a data file.", "response": "def create_from_roi_data(cls, datafile):\n \"\"\"Create an ROI model.\"\"\"\n data = np.load(datafile).flat[0]\n\n roi = cls()\n roi.load_sources(data['sources'].values())\n\n return roi"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate an ROIModel instance from a selection.", "response": "def create(cls, selection, config, **kwargs):\n \"\"\"Create an ROIModel instance.\"\"\"\n\n if selection['target'] is not None:\n return cls.create_from_source(selection['target'],\n config, **kwargs)\n else:\n target_skydir = wcs_utils.get_target_skydir(selection)\n return cls.create_from_position(target_skydir, config, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates an ROIModel instance centered on a sky direction.", "response": "def create_from_position(cls, skydir, config, **kwargs):\n \"\"\"Create an ROIModel instance centered on a sky direction.\n\n Parameters\n ----------\n\n skydir : `~astropy.coordinates.SkyCoord` \n Sky direction on which the ROI will be centered.\n\n config : dict\n Model configuration dictionary.\n \"\"\"\n\n coordsys = kwargs.pop('coordsys', 'CEL')\n roi = cls(config, skydir=skydir, coordsys=coordsys, **kwargs)\n return roi"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef create_from_source(cls, name, config, **kwargs):\n\n coordsys = kwargs.pop('coordsys', 'CEL')\n\n roi = cls(config, src_radius=None, src_roiwidth=None,\n srcname=name, **kwargs)\n src = roi.get_source_by_name(name)\n\n return cls.create_from_position(src.skydir, config,\n coordsys=coordsys, **kwargs)", "response": "Create an ROI centered on the given source."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_source_by_name(self, name):\n srcs = self.get_sources_by_name(name)\n\n if len(srcs) == 1:\n return srcs[0]\n elif len(srcs) == 0:\n raise Exception('No source matching name: ' + name)\n elif len(srcs) > 1:\n raise Exception('Multiple sources matching name: ' + name)", "response": "Returns a single source in the ROI with the given name."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn a list of sources in the ROI matching the given name.", "response": "def get_sources_by_name(self, name):\n \"\"\"Return a list of sources in the ROI matching the given\n name. The input name string can match any of the strings in\n the names property of the source object. Case and whitespace\n are ignored when matching name strings.\n\n Parameters\n ----------\n name : str \n\n Returns\n -------\n srcs : list\n A list of `~fermipy.roi_model.Model` objects. \n \"\"\"\n\n index_name = name.replace(' ', '').lower()\n\n if index_name in self._src_dict:\n return list(self._src_dict[index_name])\n else:\n raise Exception('No source matching name: ' + name)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a list of sources that are in the diffuse table that are at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least at least one source in the diffuse table.", "response": "def get_sources(self, skydir=None, distance=None, cuts=None,\n minmax_ts=None, minmax_npred=None,\n exclude=None, square=False, coordsys='CEL',\n names=None):\n \"\"\"Retrieve list of source objects satisfying the following\n selections:\n\n * Angular separation from ``skydir`` or ROI center (if\n ``skydir`` is None) less than ``distance``. \n\n * Cuts on source properties defined in ``cuts`` list.\n\n * TS and Npred in range specified by ``minmax_ts`` and ``minmax_npred``.\n\n * Name matching a value in ``names``\n\n Sources can be excluded from the selection by adding their\n name to the ``exclude`` list.\n\n Returns\n -------\n srcs : list\n List of source objects.\n \"\"\"\n\n if skydir is None:\n skydir = self.skydir\n\n if exclude is None:\n exclude = []\n\n rsrc, srcs = self.get_sources_by_position(skydir,\n distance,\n square=square,\n coordsys=coordsys)\n\n o = []\n for s in srcs + self.diffuse_sources:\n\n if names and s.name not in names:\n continue\n if s.name in exclude:\n continue\n if not s.check_cuts(cuts):\n continue\n ts = s['ts']\n npred = s['npred']\n\n if not utils.apply_minmax_selection(ts, minmax_ts):\n continue\n if not utils.apply_minmax_selection(npred, minmax_npred):\n continue\n o.append(s)\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nretrieve sources within a certain angular distance of a sky coordinate. This function supports two types of geometric selections: circular (square=False) and square (square=True). The circular selection finds all sources with a given angular distance of the target position. The square selection finds sources within an ROI-like region of size R x R where R = 2 x dist. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Sky direction with respect to which the selection will be applied. dist : float Maximum distance in degrees from the sky coordinate. square : bool Choose whether to apply a circular or square selection. coordsys : str Coordinate system to use when applying a selection with square=True.", "response": "def get_sources_by_position(self, skydir, dist, min_dist=None,\n square=False, coordsys='CEL'):\n \"\"\"Retrieve sources within a certain angular distance of a sky\n coordinate. This function supports two types of geometric\n selections: circular (square=False) and square (square=True).\n The circular selection finds all sources with a given angular\n distance of the target position. The square selection finds\n sources within an ROI-like region of size R x R where R = 2 x\n dist.\n\n Parameters\n ----------\n\n skydir : `~astropy.coordinates.SkyCoord` \n Sky direction with respect to which the selection will be applied.\n\n dist : float\n Maximum distance in degrees from the sky coordinate.\n\n square : bool\n Choose whether to apply a circular or square selection.\n\n coordsys : str\n Coordinate system to use when applying a selection with square=True.\n\n \"\"\"\n\n msk = get_skydir_distance_mask(self._src_skydir, skydir, dist,\n min_dist=min_dist, square=square,\n coordsys=coordsys)\n\n radius = self._src_skydir.separation(skydir).deg\n radius = radius[msk]\n\n srcs = [self._srcs[i] for i in np.nonzero(msk)[0]]\n\n isort = np.argsort(radius)\n radius = radius[isort]\n srcs = [srcs[i] for i in isort]\n\n return radius, srcs"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nloads sources from a FITS catalog file.", "response": "def load_fits_catalog(self, name, **kwargs):\n \"\"\"Load sources from a FITS catalog file.\n\n Parameters\n ----------\n\n name : str\n Catalog name or path to a catalog FITS file.\n \"\"\"\n # EAC split this function to make it easier to load an existing catalog\n cat = catalog.Catalog.create(name)\n self.load_existing_catalog(cat, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nload sources from an existing catalog object.", "response": "def load_existing_catalog(self, cat, **kwargs):\n \"\"\"Load sources from an existing catalog object.\n\n Parameters\n ----------\n cat : `~fermipy.catalog.Catalog`\n Catalog object.\n\n \"\"\"\n coordsys = kwargs.get('coordsys', 'CEL')\n extdir = kwargs.get('extdir', self.extdir)\n srcname = kwargs.get('srcname', None)\n\n m0 = get_skydir_distance_mask(cat.skydir, self.skydir,\n self.config['src_radius'])\n m1 = get_skydir_distance_mask(cat.skydir, self.skydir,\n self.config['src_radius_roi'],\n square=True, coordsys=coordsys)\n m = (m0 & m1)\n if srcname is not None:\n m &= utils.find_rows_by_string(cat.table, [srcname],\n self.src_name_cols)\n\n offset = self.skydir.separation(cat.skydir).deg\n offset_cel = wcs_utils.sky_to_offset(self.skydir,\n cat.radec[:, 0], cat.radec[:, 1],\n 'CEL')\n offset_gal = wcs_utils.sky_to_offset(self.skydir,\n cat.glonlat[\n :, 0], cat.glonlat[:, 1],\n 'GAL')\n\n for i, (row, radec) in enumerate(zip(cat.table[m],\n cat.radec[m])):\n catalog_dict = catalog.row_to_dict(row)\n src_dict = {'catalog': catalog_dict}\n src_dict['Source_Name'] = row['Source_Name']\n src_dict['SpectrumType'] = row['SpectrumType']\n\n if row['extended']:\n src_dict['SourceType'] = 'DiffuseSource'\n src_dict['SpatialType'] = str(row['Spatial_Function'])\n src_dict['SpatialModel'] = str(row['Spatial_Function'])\n\n search_dirs = []\n if extdir is not None:\n search_dirs += [extdir, os.path.join(extdir, 'Templates')]\n\n search_dirs += [row['extdir'],\n os.path.join(row['extdir'], 'Templates')]\n\n if src_dict['SpatialType'] == 'SpatialMap':\n src_dict['Spatial_Filename'] = utils.resolve_file_path(\n row['Spatial_Filename'],\n search_dirs=search_dirs)\n\n else:\n src_dict['SourceType'] = 'PointSource'\n src_dict['SpatialType'] = 'SkyDirFunction'\n src_dict['SpatialModel'] = 'PointSource'\n\n src_dict['spectral_pars'] = spectral_pars_from_catalog(\n catalog_dict)\n src_dict['spatial_pars'] = spatial_pars_from_catalog(catalog_dict)\n\n src = Source(src_dict['Source_Name'], src_dict, radec=radec)\n src.data['offset'] = offset[m][i]\n src.data['offset_ra'] = offset_cel[:, 0][m][i]\n src.data['offset_dec'] = offset_cel[:, 1][m][i]\n src.data['offset_glon'] = offset_gal[:, 0][m][i]\n src.data['offset_glat'] = offset_gal[:, 1][m][i]\n self.load_source(src, False,\n merge_sources=self.config['merge_sources'])\n\n self._build_src_index()"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nloads sources from an XML file.", "response": "def load_xml(self, xmlfile, **kwargs):\n \"\"\"Load sources from an XML file.\"\"\"\n\n extdir = kwargs.get('extdir', self.extdir)\n coordsys = kwargs.get('coordsys', 'CEL')\n if not os.path.isfile(xmlfile):\n xmlfile = os.path.join(fermipy.PACKAGE_DATA, 'catalogs', xmlfile)\n\n root = ElementTree.ElementTree(file=xmlfile).getroot()\n\n diffuse_srcs = []\n srcs = []\n ra, dec = [], []\n\n for s in root.findall('source'):\n src = Source.create_from_xml(s, extdir=extdir)\n if src.diffuse:\n diffuse_srcs += [src]\n else:\n srcs += [src]\n ra += [src['RAJ2000']]\n dec += [src['DEJ2000']]\n\n src_skydir = SkyCoord(ra=np.array(ra) * u.deg,\n dec=np.array(dec) * u.deg)\n radec = np.vstack((src_skydir.ra.deg, src_skydir.dec.deg)).T\n glonlat = np.vstack((src_skydir.galactic.l.deg,\n src_skydir.galactic.b.deg)).T\n\n offset = self.skydir.separation(src_skydir).deg\n offset_cel = wcs_utils.sky_to_offset(self.skydir,\n radec[:, 0], radec[:, 1], 'CEL')\n offset_gal = wcs_utils.sky_to_offset(self.skydir,\n glonlat[:, 0], glonlat[:, 1], 'GAL')\n\n m0 = get_skydir_distance_mask(src_skydir, self.skydir,\n self.config['src_radius'])\n m1 = get_skydir_distance_mask(src_skydir, self.skydir,\n self.config['src_radius_roi'],\n square=True, coordsys=coordsys)\n m = (m0 & m1)\n srcs = np.array(srcs)[m]\n for i, s in enumerate(srcs):\n s.data['offset'] = offset[m][i]\n s.data['offset_ra'] = offset_cel[:, 0][m][i]\n s.data['offset_dec'] = offset_cel[:, 1][m][i]\n s.data['offset_glon'] = offset_gal[:, 0][m][i]\n s.data['offset_glat'] = offset_gal[:, 1][m][i]\n self.load_source(s, False,\n merge_sources=self.config['merge_sources'])\n\n for i, s in enumerate(diffuse_srcs):\n self.load_source(s, False,\n merge_sources=self.config['merge_sources'])\n\n self._build_src_index()\n return srcs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nbuilding an indices for fast lookup of a source given its name or coordinates.", "response": "def _build_src_index(self):\n \"\"\"Build an indices for fast lookup of a source given its name\n or coordinates.\"\"\"\n\n self._srcs = sorted(self._srcs, key=lambda t: t['offset'])\n nsrc = len(self._srcs)\n radec = np.zeros((2, nsrc))\n\n for i, src in enumerate(self._srcs):\n radec[:, i] = src.radec\n\n self._src_skydir = SkyCoord(ra=radec[0], dec=radec[1], unit=u.deg)\n self._src_radius = self._src_skydir.separation(self.skydir)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef write_xml(self, xmlfile, config=None):\n\n root = ElementTree.Element('source_library')\n root.set('title', 'source_library')\n\n for s in self._srcs:\n s.write_xml(root)\n\n if config is not None:\n srcs = self.create_diffuse_srcs(config)\n diffuse_srcs = {s.name: s for s in srcs}\n for s in self._diffuse_srcs:\n src = copy.deepcopy(diffuse_srcs.get(s.name, s))\n src.update_spectral_pars(s.spectral_pars)\n src.write_xml(root)\n else:\n for s in self._diffuse_srcs:\n s.write_xml(root)\n\n output_file = open(xmlfile, 'w')\n output_file.write(utils.prettify_xml(root))", "response": "Save the ROI model as an XML file."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating an astropy. table. Table object with the contents of the ROI model.", "response": "def create_table(self, names=None):\n \"\"\"Create an astropy Table object with the contents of the ROI model.\n \"\"\"\n\n scan_shape = (1,)\n for src in self._srcs:\n scan_shape = max(scan_shape, src['dloglike_scan'].shape)\n\n tab = create_source_table(scan_shape)\n for s in self._srcs:\n if names is not None and s.name not in names:\n continue\n s.add_to_table(tab)\n\n return tab"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef write_fits(self, fitsfile):\n\n tab = self.create_table()\n hdu_data = fits.table_to_hdu(tab)\n hdus = [fits.PrimaryHDU(), hdu_data]\n fits_utils.write_hdus(hdus, fitsfile)", "response": "Write the ROI model to a FITS file."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef to_ds9(self, free='box',fixed='cross',frame='fk5',color='green',header=True):\n # todo: add support for extended sources?!\n allowed_symbols = ['circle','box','diamond','cross','x','arrow','boxcircle']\n # adding some checks.\n assert free in allowed_symbols, \"symbol %s not supported\"%free\n assert fixed in allowed_symbols, \"symbol %s not supported\"%fixed\n lines = []\n if header:\n lines.append(\"global color=%s\"%color)\n for src in self.get_sources():\n # self.get_sources will return both Source, but also IsoSource and MapCube, in which case the sources\n # should be ignored (since they are by construction all-sky and have no corresponding ds9 region string)\n if not isinstance(src, Source): continue\n # otherwise get ra, dec\n ra, dec = src.radec\n line = \"%s; point( %1.5f, %1.5f) # point=%s text={%s} color=%s\"%(frame,ra, dec,\n free if src.is_free else fixed,\n src.name,\n color)\n lines.append(line)\n return lines", "response": "Returns a list of ds9 region definitions for this source."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncreate a ds9 compatible region file from the ROI.", "response": "def write_ds9region(self, region, *args, **kwargs):\n\n \"\"\"Create a ds9 compatible region file from the ROI.\n\n It calls the `to_ds9` method and write the result to the region file. Only the file name is required.\n All other parameters will be forwarded to the `to_ds9` method, see the documentation of that method\n for all accepted parameters and options.\n Parameters\n ----------\n region : str\n name of the region file (string)\n \"\"\"\n lines = self.to_ds9(*args,**kwargs)\n with open(region,'w') as fo:\n fo.write(\"\\n\".join(lines))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef main():\n\n gtselect_keys = ['tmin', 'tmax', 'emin', 'emax', 'zmax', 'evtype', 'evclass',\n 'phasemin', 'phasemax', 'convtype', 'rad', 'ra', 'dec']\n\n gtmktime_keys = ['roicut', 'filter']\n\n usage = \"usage: %(prog)s [options] \"\n description = \"Run gtselect and gtmktime on one or more FT1 files. \"\n \"Note that gtmktime will be skipped if no FT2 file is provided.\"\n parser = argparse.ArgumentParser(usage=usage, description=description)\n\n add_lsf_args(parser)\n\n for k in gtselect_keys:\n\n if k in ['evtype', 'evclass', 'convtype']:\n parser.add_argument('--%s' % k, default=None, type=int, help='')\n else:\n parser.add_argument('--%s' % k, default=None, type=float, help='')\n\n for k in gtmktime_keys:\n parser.add_argument('--%s' % k, default=None, type=str, help='')\n\n parser.add_argument('--rock_angle', default=None, type=float, help='')\n\n parser.add_argument('--outdir', default=None, type=str,\n help='Path to output directory used when merge=False.')\n parser.add_argument('--output', default=None, type=str,\n help='Path to output file used when merge=True.')\n parser.add_argument('--scfile', default=None, type=str, help='')\n\n parser.add_argument('--dry_run', default=False, action='store_true')\n parser.add_argument('--overwrite', default=False, action='store_true')\n parser.add_argument('--merge', default=False, action='store_true',\n help='Merge input FT1 files into N files where N is determined '\n 'by files_per_split.')\n\n parser.add_argument('--files_per_split', default=100,\n type=int, help='Set the number of files to combine in each '\n 'split of the input file list.')\n\n parser.add_argument('--file_idx_min', default=None,\n type=int, help='Set the number of files to assign to '\n 'each batch job.')\n\n parser.add_argument('--file_idx_max', default=None,\n type=int, help='Set the number of files to assign to '\n 'each batch job.')\n\n parser.add_argument('files', nargs='+', default=None,\n help='List of files.')\n\n args = parser.parse_args()\n\n batch = vars(args).pop('batch')\n files = vars(args).pop('files')\n args.outdir = os.path.abspath(args.outdir)\n files = [os.path.abspath(f) for f in files]\n\n ft1_files = get_files(files, ['.fit', '.fits'])\n for i, f in enumerate(ft1_files):\n if re.search('^root\\:\\/\\/', f) is None:\n ft1_files[i] = os.path.abspath(f)\n\n input_files = []\n output_files = []\n files_idx_min = []\n files_idx_max = []\n opts = []\n\n if args.file_idx_min is not None and args.file_idx_max is not None:\n\n files_idx_min = [args.file_idx_min]\n files_idx_max = [args.file_idx_max]\n input_files = [files]\n output_files = [args.output]\n\n elif args.merge:\n if not args.output:\n raise Exception('No output file defined.')\n\n nfiles = len(ft1_files)\n njob = int(np.ceil(nfiles / float(args.files_per_split)))\n for ijob, i in enumerate(range(0, nfiles, args.files_per_split)):\n\n if args.outdir is not None:\n mkdir(args.outdir)\n outdir = os.path.abspath(args.outdir)\n else:\n outdir = os.path.dirname(os.path.dirname(args.output))\n\n outfile = os.path.splitext(os.path.basename(args.output))[0]\n outfile += '_%03i.fits' % (ijob)\n outfile = os.path.join(outdir, outfile)\n input_files += [files]\n output_files += [outfile]\n files_idx_min += [i]\n files_idx_max += [i + args.files_per_split]\n opts += [vars(args).copy()]\n opts[-1]['output'] = outfile\n opts[-1]['file_idx_min'] = i\n opts[-1]['file_idx_max'] = i + args.files_per_split\n\n else:\n input_files = ft1_files\n files_idx_min = [i for i in range(len(ft1_files))]\n files_idx_max = [i + 1 for i in range(len(ft1_files))]\n output_files = [os.path.join(\n args.outdir, os.path.basename(x)) for x in ft1_files]\n opts = [vars(args).copy() for x in ft1_files]\n\n if batch:\n submit_jobs('fermipy-select',\n input_files, opts, output_files, overwrite=args.overwrite,\n dry_run=args.dry_run)\n sys.exit(0)\n\n logger = Logger.configure(os.path.basename(__file__), None, logging.INFO)\n logger.info('Starting.')\n\n if args.scfile is not None:\n args.scfile = os.path.abspath(args.scfile)\n\n cwd = os.getcwd()\n user = os.environ['USER']\n tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch')\n os.chdir(tmpdir)\n\n logger.info('tmpdir %s', tmpdir)\n logger.info('outdir %s', args.outdir)\n logger.info('output %s', args.output)\n\n for infiles, outfile, idx_min, idx_max in zip(input_files, output_files,\n files_idx_min, files_idx_max):\n\n logger.info('infiles %s', pprint.pformat(infiles))\n logger.info('outfile %s', outfile)\n infiles = get_files(infiles, ['.fit', '.fits'])\n if idx_min is not None:\n infiles = infiles[idx_min:idx_max]\n\n for i, f in enumerate(infiles):\n\n if re.search('^root\\:\\/\\/', f) is None:\n continue\n os.system('xrdcp %s %s' % (f, f.split('/')[-1]))\n infiles[i] = os.path.join(tmpdir, f.split('/')[-1])\n\n kw = {k: args.__dict__[k] for k in gtselect_keys}\n if kw['emax'] is None:\n kw['emax'] = 1E6\n\n create_filelist(infiles, 'list.txt')\n kw['infile'] = 'list.txt'\n kw['outfile'] = 'out.fits'\n staged_outfile = kw['outfile']\n run_gtapp('gtselect', logger, kw)\n\n kw = {k: args.__dict__[k] for k in gtmktime_keys}\n if kw['roicut'] is None:\n kw['roicut'] = 'no'\n\n if kw['filter'] is None:\n kw['filter'] = 'DATA_QUAL==1 && LAT_CONFIG==1'\n if args.rock_angle is not None:\n kw['filter'] += ' && ABS(ROCK_ANGLE)<%(rock)s ' % dict(\n rock=args.rock_angle)\n kw['evfile'] = 'out.fits'\n kw['outfile'] = 'out_filtered.fits'\n if args.scfile is not None:\n kw['scfile'] = args.scfile\n staged_outfile = kw['outfile']\n run_gtapp('gtmktime', logger, kw)\n\n logger.info('cp %s %s', staged_outfile, outfile)\n shutil.copy(staged_outfile, outfile)\n\n os.chdir(cwd)\n logger.info('Deleting %s', tmpdir)\n shutil.rmtree(tmpdir)\n logger.info('Done.')", "response": "Entry point for the main function of the command line interface."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nselecting only rows representing extended sources from a catalog table", "response": "def select_extended(cat_table):\n \"\"\"Select only rows representing extended sources from a catalog table\n \"\"\"\n try:\n l = [len(row.strip()) > 0 for row in cat_table['Extended_Source_Name'].data]\n return np.array(l, bool)\n except KeyError:\n return cat_table['Extended']"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates a bit mask selecting the rows that pass a selection", "response": "def make_mask(cat_table, cut):\n \"\"\"Mask a bit mask selecting the rows that pass a selection\n \"\"\"\n cut_var = cut['cut_var']\n min_val = cut.get('min_val', None)\n max_val = cut.get('max_val', None)\n\n nsrc = len(cat_table)\n if min_val is None:\n min_mask = np.ones((nsrc), bool)\n else:\n min_mask = cat_table[cut_var] >= min_val\n if max_val is None:\n max_mask = np.ones((nsrc), bool)\n else:\n max_mask = cat_table[cut_var] <= max_val\n full_mask = min_mask * max_mask\n return full_mask"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nselecting only rows passing a set of cuts from catalog table", "response": "def select_sources(cat_table, cuts):\n \"\"\"Select only rows passing a set of cuts from catalog table\n \"\"\"\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nbuilds and return the information about the catalog components", "response": "def make_catalog_comp_dict(**kwargs):\n \"\"\"Build and return the information about the catalog components\n \"\"\"\n library_yamlfile = kwargs.pop('library', 'models/library.yaml')\n csm = kwargs.pop('CatalogSourceManager', CatalogSourceManager(**kwargs))\n if library_yamlfile is None or library_yamlfile == 'None':\n yamldict = {}\n else:\n yamldict = yaml.safe_load(open(library_yamlfile))\n catalog_info_dict, comp_info_dict = csm.make_catalog_comp_info_dict(yamldict)\n return dict(catalog_info_dict=catalog_info_dict,\n comp_info_dict=comp_info_dict,\n CatalogSourceManager=csm)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nread the yaml file for a particular split key", "response": "def read_catalog_info_yaml(self, splitkey):\n \"\"\" Read the yaml file for a particular split key\n \"\"\"\n catalog_info_yaml = self._name_factory.catalog_split_yaml(sourcekey=splitkey,\n fullpath=True)\n yaml_dict = yaml.safe_load(open(catalog_info_yaml))\n # resolve env vars\n yaml_dict['catalog_file'] = os.path.expandvars(yaml_dict['catalog_file'])\n yaml_dict['catalog_extdir'] = os.path.expandvars(yaml_dict['catalog_extdir'])\n return yaml_dict"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nbuild a CatalogInfo object from a dictionary.", "response": "def build_catalog_info(self, catalog_info):\n \"\"\" Build a CatalogInfo object \"\"\"\n cat = SourceFactory.build_catalog(**catalog_info)\n catalog_info['catalog'] = cat\n # catalog_info['catalog_table'] =\n # Table.read(catalog_info['catalog_file'])\n catalog_info['catalog_table'] = cat.table\n catalog_info['roi_model'] =\\\n SourceFactory.make_fermipy_roi_model_from_catalogs([cat])\n catalog_info['srcmdl_name'] =\\\n self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name'])\n return CatalogInfo(**catalog_info)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef catalog_components(self, catalog_name, split_ver):\n return sorted(self._split_comp_info_dicts[\"%s_%s\" % (catalog_name, split_ver)].keys())", "response": "Return the set of merged components for a particular split key"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef split_comp_info(self, catalog_name, split_ver, split_key):\n return self._split_comp_info_dicts[\"%s_%s\" % (catalog_name, split_ver)][split_key]", "response": "Return the info for a particular split key"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nmaking the information about a single merged component.", "response": "def make_catalog_comp_info(self, full_cat_info, split_key, rule_key, rule_val, sources):\n \"\"\" Make the information about a single merged component\n\n Parameters\n ----------\n\n full_cat_info : `_model_component.CatalogInfo`\n Information about the full catalog\n split_key : str\n Key identifying the version of the spliting used\n rule_key : str\n Key identifying the specific rule for this component\n rule_val : list\n List of the cuts used to define this component\n sources : list\n List of the names of the sources in this component\n\n Returns `CompositeSourceInfo` or `CatalogSourcesInfo`\n \"\"\"\n merge = rule_val.get('merge', True)\n sourcekey = \"%s_%s_%s\" % (\n full_cat_info.catalog_name, split_key, rule_key)\n srcmdl_name = self._name_factory.srcmdl_xml(sourcekey=sourcekey)\n srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name)\n kwargs = dict(source_name=\"%s_%s\" % (full_cat_info.catalog_name, rule_key),\n source_ver=split_key,\n sourcekey=sourcekey,\n srcmdl_name=srcmdl_name,\n source_names=sources,\n catalog_info=full_cat_info,\n roi_model=SourceFactory.copy_selected_sources(full_cat_info.roi_model,\n sources))\n if merge:\n return CompositeSourceInfo(**kwargs)\n return CatalogSourcesInfo(**kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nmake the information about the catalog components.", "response": "def make_catalog_comp_info_dict(self, catalog_sources):\n \"\"\" Make the information about the catalog components\n\n Parameters\n ----------\n\n catalog_sources : dict\n Dictionary with catalog source defintions\n\n Returns\n -------\n\n catalog_ret_dict : dict\n Dictionary mapping catalog_name to `model_component.CatalogInfo`\n split_ret_dict : dict\n Dictionary mapping sourcekey to `model_component.ModelComponentInfo`\n \"\"\"\n catalog_ret_dict = {}\n split_ret_dict = {}\n for key, value in catalog_sources.items():\n if value is None:\n continue\n if value['model_type'] != 'catalog':\n continue\n versions = value['versions']\n for version in versions:\n ver_key = \"%s_%s\" % (key, version)\n source_dict = self.read_catalog_info_yaml(ver_key)\n try:\n full_cat_info = catalog_ret_dict[key]\n except KeyError:\n full_cat_info = self.build_catalog_info(source_dict)\n catalog_ret_dict[key] = full_cat_info\n\n try:\n all_sources = [x.strip() for x in full_cat_info.catalog_table[\n 'Source_Name'].astype(str).tolist()]\n except KeyError:\n print(full_cat_info.catalog_table.colnames)\n used_sources = []\n rules_dict = source_dict['rules_dict']\n split_dict = {}\n for rule_key, rule_val in rules_dict.items():\n # full_key =\\\n # self._name_factory.merged_sourcekey(catalog=ver_key,\n # rulekey=rule_key)\n sources = select_sources(\n full_cat_info.catalog_table, rule_val['cuts'])\n used_sources.extend(sources)\n split_dict[rule_key] = self.make_catalog_comp_info(\n full_cat_info, version, rule_key, rule_val, sources)\n\n # Now deal with the remainder\n for source in used_sources:\n try:\n all_sources.remove(source)\n except ValueError:\n continue\n rule_val = dict(cuts=[],\n merge=source_dict['remainder'].get('merge', False))\n split_dict['remain'] = self.make_catalog_comp_info(\n full_cat_info, version, 'remain', rule_val, all_sources)\n\n # Merge in the info for this version of splits\n split_ret_dict[ver_key] = split_dict\n\n self._catalog_comp_info_dicts.update(catalog_ret_dict)\n self._split_comp_info_dicts.update(split_ret_dict)\n return (catalog_ret_dict, split_ret_dict)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nextracting data from table HDUs in TSCube file and convert them to FITS images", "response": "def extract_images_from_tscube(infile, outfile):\n \"\"\" Extract data from table HDUs in TSCube file and convert them to FITS images\n \"\"\"\n inhdulist = fits.open(infile)\n wcs = pywcs.WCS(inhdulist[0].header)\n map_shape = inhdulist[0].data.shape\n\n t_eng = Table.read(infile, \"EBOUNDS\")\n t_scan = Table.read(infile, \"SCANDATA\")\n t_fit = Table.read(infile, \"FITDATA\")\n\n n_ebin = len(t_eng)\n energies = np.ndarray((n_ebin + 1))\n energies[0:-1] = t_eng[\"E_MIN\"]\n energies[-1] = t_eng[\"E_MAX\"][-1]\n\n cube_shape = (n_ebin, map_shape[1], map_shape[0])\n\n wcs_cube = wcs_utils.wcs_add_energy_axis(wcs, energies)\n\n outhdulist = [inhdulist[0], inhdulist[\"EBOUNDS\"]]\n\n FIT_COLNAMES = ['FIT_TS', 'FIT_STATUS', 'FIT_NORM',\n 'FIT_NORM_ERR', 'FIT_NORM_ERRP', 'FIT_NORM_ERRN']\n SCAN_COLNAMES = ['TS', 'BIN_STATUS', 'NORM', 'NORM_UL',\n 'NORM_ERR', 'NORM_ERRP', 'NORM_ERRN', 'LOGLIKE']\n\n for c in FIT_COLNAMES:\n data = t_fit[c].data.reshape(map_shape)\n hdu = fits.ImageHDU(data, wcs.to_header(), name=c)\n outhdulist.append(hdu)\n\n for c in SCAN_COLNAMES:\n data = t_scan[c].data.swapaxes(0, 1).reshape(cube_shape)\n hdu = fits.ImageHDU(data, wcs_cube.to_header(), name=c)\n outhdulist.append(hdu)\n\n hdulist = fits.HDUList(outhdulist)\n hdulist.writeto(outfile, clobber=True)\n return hdulist"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef convert_tscube_old(infile, outfile):\n inhdulist = fits.open(infile)\n\n # If already in the new-style format just write and exit\n if 'DLOGLIKE_SCAN' in inhdulist['SCANDATA'].columns.names:\n if infile != outfile:\n inhdulist.writeto(outfile, clobber=True)\n return\n\n # Get stuff out of the input file\n nrows = inhdulist['SCANDATA']._nrows\n nebins = inhdulist['EBOUNDS']._nrows\n npts = inhdulist['SCANDATA'].data.field('NORMSCAN').shape[1] / nebins\n\n emin = inhdulist['EBOUNDS'].data.field('e_min') / 1E3\n emax = inhdulist['EBOUNDS'].data.field('e_max') / 1E3\n eref = np.sqrt(emin * emax)\n dnde_emin = inhdulist['EBOUNDS'].data.field('E_MIN_FL')\n dnde_emax = inhdulist['EBOUNDS'].data.field('E_MAX_FL')\n index = np.log(dnde_emin / dnde_emax) / np.log(emin / emax)\n\n flux = PowerLaw.eval_flux(emin, emax, [dnde_emin, index], emin)\n eflux = PowerLaw.eval_eflux(emin, emax, [dnde_emin, index], emin)\n dnde = PowerLaw.eval_dnde(np.sqrt(emin * emax), [dnde_emin, index], emin)\n\n ts_map = inhdulist['PRIMARY'].data.reshape((nrows))\n ok_map = inhdulist['TSMAP_OK'].data.reshape((nrows))\n n_map = inhdulist['N_MAP'].data.reshape((nrows))\n errp_map = inhdulist['ERRP_MAP'].data.reshape((nrows))\n errn_map = inhdulist['ERRN_MAP'].data.reshape((nrows))\n err_map = np.ndarray((nrows))\n m = errn_map > 0\n err_map[m] = 0.5 * (errp_map[m] + errn_map[m])\n err_map[~m] = errp_map[~m]\n ul_map = n_map + 2.0 * errp_map\n\n ncube = np.rollaxis(inhdulist['N_CUBE'].data,\n 0, 3).reshape((nrows, nebins))\n errpcube = np.rollaxis(\n inhdulist['ERRPCUBE'].data, 0, 3).reshape((nrows, nebins))\n errncube = np.rollaxis(\n inhdulist['ERRNCUBE'].data, 0, 3).reshape((nrows, nebins))\n tscube = np.rollaxis(inhdulist['TSCUBE'].data,\n 0, 3).reshape((nrows, nebins))\n nll_cube = np.rollaxis(\n inhdulist['NLL_CUBE'].data, 0, 3).reshape((nrows, nebins))\n ok_cube = np.rollaxis(\n inhdulist['TSCUBE_OK'].data, 0, 3).reshape((nrows, nebins))\n\n ul_cube = ncube + 2.0 * errpcube\n m = errncube > 0\n errcube = np.ndarray((nrows, nebins))\n errcube[m] = 0.5 * (errpcube[m] + errncube[m])\n errcube[~m] = errpcube[~m]\n\n norm_scan = inhdulist['SCANDATA'].data.field(\n 'NORMSCAN').reshape((nrows, npts, nebins)).swapaxes(1, 2)\n nll_scan = inhdulist['SCANDATA'].data.field(\n 'NLL_SCAN').reshape((nrows, npts, nebins)).swapaxes(1, 2)\n\n # Adjust the \"EBOUNDS\" hdu\n columns = inhdulist['EBOUNDS'].columns\n columns.add_col(fits.Column(name=str('e_ref'),\n format='E', array=eref * 1E3,\n unit='keV'))\n columns.add_col(fits.Column(name=str('ref_flux'),\n format='D', array=flux,\n unit='ph / (cm2 s)'))\n columns.add_col(fits.Column(name=str('ref_eflux'),\n format='D', array=eflux,\n unit='MeV / (cm2 s)'))\n columns.add_col(fits.Column(name=str('ref_dnde'),\n format='D', array=dnde,\n unit='ph / (MeV cm2 s)'))\n\n columns.change_name('E_MIN_FL', str('ref_dnde_e_min'))\n columns.change_unit('ref_dnde_e_min', 'ph / (MeV cm2 s)')\n columns.change_name('E_MAX_FL', str('ref_dnde_e_max'))\n columns.change_unit('ref_dnde_e_max', 'ph / (MeV cm2 s)')\n columns.change_name('NPRED', str('ref_npred'))\n\n hdu_e = fits.BinTableHDU.from_columns(columns, name='EBOUNDS')\n\n # Make the \"FITDATA\" hdu\n columns = fits.ColDefs([])\n\n columns.add_col(fits.Column(\n name=str('fit_ts'), format='E', array=ts_map))\n columns.add_col(fits.Column(\n name=str('fit_status'), format='E', array=ok_map))\n columns.add_col(fits.Column(\n name=str('fit_norm'), format='E', array=n_map))\n columns.add_col(fits.Column(\n name=str('fit_norm_err'), format='E', array=err_map))\n columns.add_col(fits.Column(\n name=str('fit_norm_errp'), format='E', array=errp_map))\n columns.add_col(fits.Column(\n name=str('fit_norm_errn'), format='E', array=errn_map))\n hdu_f = fits.BinTableHDU.from_columns(columns, name='FITDATA')\n\n # Make the \"SCANDATA\" hdu\n columns = fits.ColDefs([])\n\n columns.add_col(fits.Column(name=str('ts'),\n format='%iE' % nebins, array=tscube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('bin_status'),\n format='%iE' % nebins, array=ok_cube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('norm'),\n format='%iE' % nebins, array=ncube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('norm_ul'),\n format='%iE' % nebins, array=ul_cube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('norm_err'),\n format='%iE' % nebins, array=errcube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('norm_errp'),\n format='%iE' % nebins, array=errpcube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('norm_errn'),\n format='%iE' % nebins, array=errncube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('loglike'),\n format='%iE' % nebins, array=nll_cube,\n dim=str('(%i)' % nebins)))\n\n columns.add_col(fits.Column(name=str('norm_scan'),\n format='%iE' % (nebins * npts),\n array=norm_scan,\n dim=str('(%i,%i)' % (npts, nebins))))\n\n columns.add_col(fits.Column(name=str('dloglike_scan'),\n format='%iE' % (nebins * npts),\n array=nll_scan,\n dim=str('(%i,%i)' % (npts, nebins))))\n\n hdu_s = fits.BinTableHDU.from_columns(columns, name='SCANDATA')\n\n hdulist = fits.HDUList([inhdulist[0],\n hdu_s,\n hdu_f,\n inhdulist[\"BASELINE\"],\n hdu_e])\n\n hdulist['SCANDATA'].header['UL_CONF'] = 0.95\n\n hdulist.writeto(outfile, clobber=True)\n\n return hdulist", "response": "Convert between old and new TSCube formats."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef truncate_array(array1, array2, position):\n\n slices = []\n for i in range(array1.ndim):\n xmin = 0\n xmax = array1.shape[i]\n dxlo = array1.shape[i] // 2\n dxhi = array1.shape[i] - dxlo\n if position[i] - dxlo < 0:\n xmin = max(dxlo - position[i], 0)\n\n if position[i] + dxhi > array2.shape[i]:\n xmax = array1.shape[i] - (position[i] + dxhi - array2.shape[i])\n xmax = max(xmax, 0)\n slices += [slice(xmin, xmax)]\n\n return array1[slices]", "response": "Truncate array1 by finding the overlap with array2 when the array1 center is located at the given position in array2."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nwrapping to perform row-wise aggregation of list arguments and pass them to a function. The return value of the function is summed over the argument groups. Non-list arguments will be automatically cast to a list.", "response": "def _sum_wrapper(fn):\n \"\"\"\n Wrapper to perform row-wise aggregation of list arguments and pass\n them to a function. The return value of the function is summed\n over the argument groups. Non-list arguments will be\n automatically cast to a list.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n v = 0\n new_args = _cast_args_to_list(args)\n for arg in zip(*new_args):\n v += fn(*arg, **kwargs)\n return v\n\n return wrapper"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncomputes the amplitudes bounds for the root of _f_cash_root_cython.", "response": "def _amplitude_bounds(counts, bkg, model):\n \"\"\"\n Compute bounds for the root of `_f_cash_root_cython`.\n\n Parameters\n ----------\n counts : `~numpy.ndarray`\n Count map.\n bkg : `~numpy.ndarray`\n Background map.\n model : `~numpy.ndarray`\n Source template (multiplied with exposure).\n \"\"\"\n\n if isinstance(counts, list):\n counts = np.concatenate([t.flat for t in counts])\n bkg = np.concatenate([t.flat for t in bkg])\n model = np.concatenate([t.flat for t in model])\n\n s_model = np.sum(model)\n s_counts = np.sum(counts)\n\n sn = bkg / model\n imin = np.argmin(sn)\n sn_min = sn[imin]\n c_min = counts[imin]\n\n b_min = c_min / s_model - sn_min\n b_max = s_counts / s_model - sn_min\n return max(b_min, 0), b_max"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nfunction to find the root of a cash archive.", "response": "def _f_cash_root(x, counts, bkg, model):\n \"\"\"\n Function to find root of. Described in Appendix A, Stewart (2009).\n\n Parameters\n ----------\n x : float\n Model amplitude.\n counts : `~numpy.ndarray`\n Count map slice, where model is defined.\n bkg : `~numpy.ndarray`\n Background map slice, where model is defined.\n model : `~numpy.ndarray`\n Source template (multiplied with exposure).\n \"\"\"\n return np.sum(model * (counts / (x * model + bkg) - 1.0))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _root_amplitude_brentq(counts, bkg, model, root_fn=_f_cash_root):\n\n # Compute amplitude bounds and assert counts > 0\n amplitude_min, amplitude_max = _amplitude_bounds(counts, bkg, model)\n\n if not np.sum(counts) > 0:\n return amplitude_min, 0\n\n args = (counts, bkg, model)\n\n if root_fn(0.0, *args) < 0:\n return 0.0, 1\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n try:\n result = brentq(root_fn, amplitude_min, amplitude_max, args=args,\n maxiter=MAX_NITER, full_output=True, rtol=1E-4)\n return result[0], result[1].iterations\n except (RuntimeError, ValueError):\n # Where the root finding fails NaN is set as amplitude\n return np.nan, MAX_NITER", "response": "Fit amplitude by finding roots using Brent algorithm."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef poisson_log_like(counts, model):\n loglike = np.array(model)\n m = counts > 0\n loglike[m] -= counts[m] * np.log(model[m])\n return loglike", "response": "Compute the Poisson log - likelihood function for the given counts and model arrays."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef f_cash(x, counts, bkg, model):\n\n return 2.0 * poisson_log_like(counts, bkg + x * model)", "response": "Returns the Poisson log - likelihood of the cash statistics."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncomputes the TS value at a given pixel position using the approach described in Stewart 2009.", "response": "def _ts_value(position, counts, bkg, model, C_0_map):\n \"\"\"\n Compute TS value at a given pixel position using the approach described\n in Stewart (2009).\n\n Parameters\n ----------\n position : tuple\n Pixel position.\n counts : `~numpy.ndarray`\n Count map.\n bkg : `~numpy.ndarray`\n Background map.\n model : `~numpy.ndarray`\n Source model map.\n\n Returns\n -------\n TS : float\n TS value at the given pixel position.\n \"\"\"\n extract_fn = _collect_wrapper(extract_large_array)\n truncate_fn = _collect_wrapper(extract_small_array)\n\n # Get data slices\n counts_slice = extract_fn(counts, model, position)\n bkg_slice = extract_fn(bkg, model, position)\n C_0_slice = extract_fn(C_0_map, model, position)\n model_slice = truncate_fn(model, counts, position)\n\n # Flattened Arrays\n counts_ = np.concatenate([t.flat for t in counts_slice])\n bkg_ = np.concatenate([t.flat for t in bkg_slice])\n model_ = np.concatenate([t.flat for t in model_slice])\n C_0_ = np.concatenate([t.flat for t in C_0_slice])\n C_0 = np.sum(C_0_)\n\n root_fn = _sum_wrapper(_f_cash_root)\n amplitude, niter = _root_amplitude_brentq(counts_, bkg_, model_,\n root_fn=_f_cash_root)\n\n if niter > MAX_NITER:\n print('Exceeded maximum number of function evaluations!')\n return np.nan, amplitude, niter\n\n with np.errstate(invalid='ignore', divide='ignore'):\n C_1 = f_cash_sum(amplitude, counts_, bkg_, model_)\n\n # Compute and return TS value\n return (C_0 - C_1) * np.sign(amplitude), amplitude, niter"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncomputes TS value at a given pixel position using the newton method.", "response": "def _ts_value_newton(position, counts, bkg, model, C_0_map):\n \"\"\"\n Compute TS value at a given pixel position using the newton\n method.\n\n Parameters\n ----------\n position : tuple\n Pixel position.\n\n counts : `~numpy.ndarray`\n Count map.\n\n bkg : `~numpy.ndarray`\n Background map.\n\n model : `~numpy.ndarray`\n Source model map.\n\n Returns\n -------\n TS : float\n TS value at the given pixel position.\n\n amp : float\n Best-fit amplitude of the test source.\n\n niter : int\n Number of fit iterations.\n \"\"\"\n extract_fn = _collect_wrapper(extract_large_array)\n truncate_fn = _collect_wrapper(extract_small_array)\n\n # Get data slices\n counts_slice = extract_fn(counts, model, position)\n bkg_slice = extract_fn(bkg, model, position)\n C_0_map_slice = extract_fn(C_0_map, model, position)\n model_slice = truncate_fn(model, counts, position)\n\n # Mask of pixels with > 0 counts\n mask = [c > 0 for c in counts_slice]\n\n # Sum of background and model in empty pixels\n bkg_sum = np.sum(np.array([np.sum(t[~m])\n for t, m in zip(bkg_slice, mask)]))\n model_sum = np.sum(np.array([np.sum(t[~m])\n for t, m in zip(model_slice, mask)]))\n\n # Flattened Arrays\n counts_ = np.concatenate([t[m].flat for t, m in zip(counts_slice, mask)])\n bkg_ = np.concatenate([t[m].flat for t, m in zip(bkg_slice, mask)])\n model_ = np.concatenate([t[m].flat for t, m in zip(model_slice, mask)])\n C_0 = np.sum(np.array([np.sum(t) for t in C_0_map_slice]))\n\n amplitude, niter = _fit_amplitude_newton(counts_, bkg_, model_,\n model_sum)\n\n if niter > MAX_NITER:\n print('Exceeded maximum number of function evaluations!')\n return np.nan, amplitude, niter\n\n with np.errstate(invalid='ignore', divide='ignore'):\n C_1 = f_cash_sum(amplitude, counts_, bkg_, model_, bkg_sum, model_sum)\n\n # Compute and return TS value\n return (C_0 - C_1) * np.sign(amplitude), amplitude, niter"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngenerating a spatial TS map for a source component with the specified properties defined by the model argument.", "response": "def tsmap(self, prefix='', **kwargs):\n \"\"\"Generate a spatial TS map for a source component with\n properties defined by the `model` argument. The TS map will\n have the same geometry as the ROI. The output of this method\n is a dictionary containing `~fermipy.skymap.Map` objects with\n the TS and amplitude of the best-fit test source. By default\n this method will also save maps to FITS files and render them\n as image files.\n\n This method uses a simplified likelihood fitting\n implementation that only fits for the normalization of the\n test source. Before running this method it is recommended to\n first optimize the ROI model (e.g. by running\n :py:meth:`~fermipy.gtanalysis.GTAnalysis.optimize`).\n\n Parameters\n ----------\n prefix : str\n Optional string that will be prepended to all output files.\n\n {options}\n\n Returns\n -------\n tsmap : dict\n A dictionary containing the `~fermipy.skymap.Map` objects\n for TS and source amplitude.\n\n \"\"\"\n timer = Timer.create(start=True)\n\n schema = ConfigSchema(self.defaults['tsmap'])\n schema.add_option('loglevel', logging.INFO)\n schema.add_option('map_skydir', None, '', astropy.coordinates.SkyCoord)\n schema.add_option('map_size', 1.0)\n schema.add_option('threshold', 1E-2, '', float)\n schema.add_option('use_pylike', True, '', bool)\n schema.add_option('outfile', None, '', str)\n config = schema.create_config(self.config['tsmap'], **kwargs)\n\n # Defining default properties of test source model\n config['model'].setdefault('Index', 2.0)\n config['model'].setdefault('SpectrumType', 'PowerLaw')\n config['model'].setdefault('SpatialModel', 'PointSource')\n\n self.logger.log(config['loglevel'], 'Generating TS map')\n\n o = self._make_tsmap_fast(prefix, **config)\n\n if config['make_plots']:\n plotter = plotting.AnalysisPlotter(self.config['plotting'],\n fileio=self.config['fileio'],\n logging=self.config['logging'])\n\n plotter.make_tsmap_plots(o, self.roi)\n\n self.logger.log(config['loglevel'], 'Finished TS map')\n\n outfile = config.get('outfile', None)\n if outfile is None:\n outfile = utils.format_filename(self.workdir, 'tsmap',\n prefix=[o['name']])\n else:\n outfile = os.path.join(self.workdir,\n os.path.splitext(outfile)[0])\n\n if config['write_fits']:\n o['file'] = os.path.basename(outfile) + '.fits'\n self._make_tsmap_fits(o, outfile + '.fits')\n\n if config['write_npy']:\n np.save(outfile + '.npy', o)\n\n self.logger.log(config['loglevel'],\n 'Execution time: %.2f s', timer.elapsed_time)\n return o"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nmakes a TS map from a GTAnalysis instance. This is a simplified implementation optimized for speed that only fits for the source normalization (all background components are kept fixed). The spectral/spatial characteristics of the test source can be defined with the src_dict argument. By default this method will generate a TS map for a point source with an index=2.0 power-law spectrum. Parameters ---------- model : dict or `~fermipy.roi_model.Source` Dictionary or Source object defining the properties of the test source that will be used in the scan.", "response": "def _make_tsmap_fast(self, prefix, **kwargs):\n \"\"\"\n Make a TS map from a GTAnalysis instance. This is a\n simplified implementation optimized for speed that only fits\n for the source normalization (all background components are\n kept fixed). The spectral/spatial characteristics of the test\n source can be defined with the src_dict argument. By default\n this method will generate a TS map for a point source with an\n index=2.0 power-law spectrum.\n\n Parameters\n ----------\n model : dict or `~fermipy.roi_model.Source`\n Dictionary or Source object defining the properties of the\n test source that will be used in the scan.\n\n \"\"\"\n loglevel = kwargs.get('loglevel', self.loglevel)\n\n src_dict = copy.deepcopy(kwargs.setdefault('model', {}))\n src_dict = {} if src_dict is None else src_dict\n\n multithread = kwargs.setdefault('multithread', False)\n threshold = kwargs.setdefault('threshold', 1E-2)\n max_kernel_radius = kwargs.get('max_kernel_radius')\n loge_bounds = kwargs.setdefault('loge_bounds', None)\n use_pylike = kwargs.setdefault('use_pylike', True)\n\n if loge_bounds:\n if len(loge_bounds) != 2:\n raise Exception('Wrong size of loge_bounds array.')\n loge_bounds[0] = (loge_bounds[0] if loge_bounds[0] is not None\n else self.log_energies[0])\n loge_bounds[1] = (loge_bounds[1] if loge_bounds[1] is not None\n else self.log_energies[-1])\n else:\n loge_bounds = [self.log_energies[0], self.log_energies[-1]]\n\n # Put the test source at the pixel closest to the ROI center\n xpix, ypix = (np.round((self.npix - 1.0) / 2.),\n np.round((self.npix - 1.0) / 2.))\n cpix = np.array([xpix, ypix])\n\n map_geom = self._geom.to_image()\n frame = coordsys_to_frame(map_geom.coordsys)\n skydir = SkyCoord(*map_geom.pix_to_coord((cpix[0], cpix[1])),\n frame=frame, unit='deg')\n skydir = skydir.transform_to('icrs')\n\n src_dict['ra'] = skydir.ra.deg\n src_dict['dec'] = skydir.dec.deg\n src_dict.setdefault('SpatialModel', 'PointSource')\n src_dict.setdefault('SpatialWidth', 0.3)\n src_dict.setdefault('Index', 2.0)\n src_dict.setdefault('Prefactor', 1E-13)\n\n counts = []\n bkg = []\n model = []\n c0_map = []\n eslices = []\n enumbins = []\n model_npred = 0\n for c in self.components:\n\n imin = utils.val_to_edge(c.log_energies, loge_bounds[0])[0]\n imax = utils.val_to_edge(c.log_energies, loge_bounds[1])[0]\n\n eslice = slice(imin, imax)\n bm = c.model_counts_map(exclude=kwargs['exclude']).data.astype('float')[\n eslice, ...]\n cm = c.counts_map().data.astype('float')[eslice, ...]\n\n bkg += [bm]\n counts += [cm]\n c0_map += [cash(cm, bm)]\n eslices += [eslice]\n enumbins += [cm.shape[0]]\n\n self.add_source('tsmap_testsource', src_dict, free=True,\n init_source=False, use_single_psf=True,\n use_pylike=use_pylike,\n loglevel=logging.DEBUG)\n src = self.roi['tsmap_testsource']\n # self.logger.info(str(src_dict))\n modelname = utils.create_model_name(src)\n for c, eslice in zip(self.components, eslices):\n mm = c.model_counts_map('tsmap_testsource').data.astype('float')[\n eslice, ...]\n model_npred += np.sum(mm)\n model += [mm]\n\n self.delete_source('tsmap_testsource', loglevel=logging.DEBUG)\n\n for i, mm in enumerate(model):\n\n dpix = 3\n for j in range(mm.shape[0]):\n\n ix, iy = np.unravel_index(\n np.argmax(mm[j, ...]), mm[j, ...].shape)\n\n mx = mm[j, ix, :] > mm[j, ix, iy] * threshold\n my = mm[j, :, iy] > mm[j, ix, iy] * threshold\n dpix = max(dpix, np.round(np.sum(mx) / 2.))\n dpix = max(dpix, np.round(np.sum(my) / 2.))\n\n if max_kernel_radius is not None and \\\n dpix > int(max_kernel_radius / self.components[i].binsz):\n dpix = int(max_kernel_radius / self.components[i].binsz)\n\n xslice = slice(max(int(xpix - dpix), 0),\n min(int(xpix + dpix + 1), self.npix))\n model[i] = model[i][:, xslice, xslice]\n\n ts_values = np.zeros((self.npix, self.npix))\n amp_values = np.zeros((self.npix, self.npix))\n\n wrap = functools.partial(_ts_value_newton, counts=counts,\n bkg=bkg, model=model,\n C_0_map=c0_map)\n\n if kwargs['map_skydir'] is not None:\n\n map_offset = wcs_utils.skydir_to_pix(kwargs['map_skydir'],\n map_geom.wcs)\n\n map_delta = 0.5 * kwargs['map_size'] / self.components[0].binsz\n xmin = max(int(np.ceil(map_offset[1] - map_delta)), 0)\n xmax = min(int(np.floor(map_offset[1] + map_delta)) + 1, self.npix)\n ymin = max(int(np.ceil(map_offset[0] - map_delta)), 0)\n ymax = min(int(np.floor(map_offset[0] + map_delta)) + 1, self.npix)\n\n xslice = slice(xmin, xmax)\n yslice = slice(ymin, ymax)\n xyrange = [range(xmin, xmax), range(ymin, ymax)]\n\n wcs = map_geom.wcs.deepcopy()\n npix = (ymax - ymin, xmax - xmin)\n crpix = (map_geom._crpix[0] - ymin, map_geom._crpix[1] - xmin)\n wcs.wcs.crpix[0] -= ymin\n wcs.wcs.crpix[1] -= xmin\n\n # FIXME: We should implement this with a proper cutout method\n map_geom = WcsGeom(wcs, npix, crpix=crpix)\n else:\n xyrange = [range(self.npix), range(self.npix)]\n xslice = slice(0, self.npix)\n yslice = slice(0, self.npix)\n\n positions = []\n for i, j in itertools.product(xyrange[0], xyrange[1]):\n p = [[k // 2, i, j] for k in enumbins]\n positions += [p]\n\n self.logger.log(loglevel, 'Fitting test source.')\n if multithread:\n pool = Pool()\n results = pool.map(wrap, positions)\n pool.close()\n pool.join()\n else:\n results = map(wrap, positions)\n\n for i, r in enumerate(results):\n ix = positions[i][0][1]\n iy = positions[i][0][2]\n ts_values[ix, iy] = r[0]\n amp_values[ix, iy] = r[1]\n\n ts_values = ts_values[xslice, yslice]\n amp_values = amp_values[xslice, yslice]\n\n ts_map = WcsNDMap(map_geom, ts_values)\n sqrt_ts_map = WcsNDMap(map_geom, ts_values**0.5)\n npred_map = WcsNDMap(map_geom, amp_values * model_npred)\n amp_map = WcsNDMap(map_geom, amp_values * src.get_norm())\n\n o = {'name': utils.join_strings([prefix, modelname]),\n 'src_dict': copy.deepcopy(src_dict),\n 'file': None,\n 'ts': ts_map,\n 'sqrt_ts': sqrt_ts_map,\n 'npred': npred_map,\n 'amplitude': amp_map,\n 'loglike': -self.like(),\n 'config': kwargs\n }\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef tscube(self, prefix='', **kwargs):\n\n self.logger.info('Generating TS cube')\n\n schema = ConfigSchema(self.defaults['tscube'])\n schema.add_option('make_plots', True)\n schema.add_option('write_fits', True)\n schema.add_option('write_npy', True)\n config = schema.create_config(self.config['tscube'], **kwargs)\n\n maps = self._make_ts_cube(prefix, **config)\n\n if config['make_plots']:\n plotter = plotting.AnalysisPlotter(self.config['plotting'],\n fileio=self.config['fileio'],\n logging=self.config['logging'])\n\n plotter.make_tsmap_plots(maps, self.roi, suffix='tscube')\n\n self.logger.info(\"Finished TS cube\")\n return maps", "response": "This method generates a spatial TS map for a source component with the specified properties defined by the model argument."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0, spatial_model='PointSource',\n spatial_size=1E-3):\n \"\"\"Calculate the observed signal and background counts given models\n for the exposure, background intensity, PSF, and source flux.\n\n Parameters\n ----------\n ebins : `~numpy.ndarray`\n Array of energy bin edges.\n\n exp : `~numpy.ndarray`\n Model for exposure.\n\n psf : `~fermipy.irfs.PSFModel`\n Model for average PSF.\n\n bkg : `~numpy.ndarray`\n Array of background intensities.\n\n fn : `~fermipy.spectrum.SpectralFunction`\n\n egy_dim : int\n Index of energy dimension in ``bkg`` and ``exp`` arrays.\n\n \"\"\"\n ewidth = utils.edge_to_width(ebins)\n ectr = np.exp(utils.edge_to_center(np.log(ebins)))\n\n r68 = psf.containment_angle(ectr, fraction=0.68)\n if spatial_model != 'PointSource':\n r68[r68 < spatial_size] = spatial_size\n\n # * np.ones((len(ectr), 31))\n theta_edges = np.linspace(0.0, 3.0, 31)[np.newaxis, :]\n theta_edges = theta_edges * r68[:, np.newaxis]\n theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])\n domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)\n\n if spatial_model == 'PointSource':\n sig_pdf = domega * psf.interp(ectr[:, np.newaxis], theta)\n elif spatial_model == 'RadialGaussian':\n sig_pdf = domega * utils.convolve2d_gauss(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),\n theta, spatial_size / 1.5095921854516636, nstep=2000)\n elif spatial_model == 'RadialDisk':\n sig_pdf = domega * utils.convolve2d_disk(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),\n theta, spatial_size / 0.8246211251235321)\n else:\n raise ValueError('Invalid spatial model: {}'.format(spatial_model))\n\n sig_pdf *= (np.pi / 180.)**2\n sig_flux = fn.flux(ebins[:-1], ebins[1:])\n\n # Background and signal counts\n bkgc = bkg[..., np.newaxis] * domega * exp[..., np.newaxis] * \\\n ewidth[..., np.newaxis] * (np.pi / 180.)**2\n sigc = sig_pdf * sig_flux[..., np.newaxis] * exp[..., np.newaxis]\n\n return sigc, bkgc", "response": "Compute observed signal and background counts given models of exposure background intensity PSF and source flux."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=None, bkg_fit=None,\n rebin_axes=None):\n \"\"\"Solve for the normalization of the signal distribution at which the\n detection test statistic (twice delta-loglikelihood ratio) is >=\n ``ts_thresh`` AND the number of signal counts >= ``min_counts``.\n This function uses the Asimov method to calculate the median\n expected TS when the model for the background is fixed (no\n uncertainty on the background amplitude).\n\n Parameters\n ----------\n sig : `~numpy.ndarray`\n Array of signal amplitudes in counts.\n\n bkg : `~numpy.ndarray`\n Array of background amplitudes in counts.\n\n ts_thresh : float\n Test statistic threshold.\n\n min_counts : float\n Counts threshold.\n\n sum_axes : list\n Axes over which the source test statistic should be summed.\n By default the summation will be performed over all\n dimensions.\n\n bkg_fit : `~numpy.ndarray`\n Array of background amplitudes in counts for the fitting\n model. If None then the fit model will be equal to the data\n model.\n\n \"\"\"\n\n if sum_axes is None:\n sum_axes = np.arange(sig.ndim)\n\n sig = np.expand_dims(sig, -1)\n bkg = np.expand_dims(bkg, -1)\n sig_sum = np.apply_over_axes(np.sum, sig, sum_axes)\n bkg_sum = np.apply_over_axes(np.sum, bkg, sum_axes)\n bkg_fit_sum = None\n\n if bkg_fit is not None:\n bkg_fit = np.expand_dims(bkg_fit, -1)\n bkg_fit_sum = np.apply_over_axes(np.sum, bkg_fit, sum_axes)\n\n sig_rebin = sig\n bkg_rebin = bkg\n bkg_fit_rebin = bkg_fit\n\n if rebin_axes:\n sig_rebin = sig.copy()\n bkg_rebin = bkg.copy()\n if bkg_fit is not None:\n bkg_fit_rebin = bkg_fit.copy()\n\n for dim, rebin in zip(sum_axes, rebin_axes):\n sig_rebin = sum_bins(sig_rebin, dim, rebin)\n bkg_rebin = sum_bins(bkg_rebin, dim, rebin)\n if bkg_fit is not None:\n bkg_fit_rebin = sum_bins(bkg_fit_rebin, dim, rebin)\n\n # Find approx solution using coarse binning and summed arrays\n sig_scale = 10**np.linspace(0.0, 10.0, 51) * (min_counts / sig_sum)\n vals_approx = _solve_norm(sig_rebin, bkg_rebin, ts_thresh, min_counts,\n sig_scale, sum_axes, bkg_fit_rebin)\n\n # Refine solution using an interval (0.1,10) around approx\n # solution\n sig_scale = (10**np.linspace(0.0, 1.0, 21) *\n np.fmax(0.333 * vals_approx[..., None],\n min_counts / sig_sum))\n\n vals = _solve_norm(sig, bkg, ts_thresh, min_counts, sig_scale,\n sum_axes, bkg_fit)\n\n #sig_scale = 10**np.linspace(0.0, 10.0, 101)*(min_counts / sig_sum)\n # vals = _solve_norm(sig, bkg, ts_thresh, min_counts, sig_scale2,\n # sum_axes, bkg_fit)\n\n return vals", "response": "Solve for the normalization of the signal distribution at which the detection test statistic at which the background distribution at which the delta - loglikelihood ratio is > ts_thresh AND the number of signal counts >= min_counts."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_psf(event_class, event_type, dtheta, egy, cth):\n irf = create_irf(event_class, event_type)\n theta = np.degrees(np.arccos(cth))\n m = np.zeros((len(dtheta), len(egy), len(cth)))\n\n for i, x in enumerate(egy):\n for j, y in enumerate(theta):\n m[:, i, j] = irf.psf().value(dtheta, x, y, 0.0)\n\n return m", "response": "Create an array of PSF response values versus energy and inclination angle."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates an array of energy response values versus energy and incidence angle.", "response": "def create_edisp(event_class, event_type, erec, egy, cth):\n \"\"\"Create an array of energy response values versus energy and\n inclination angle.\n\n Parameters\n ----------\n egy : `~numpy.ndarray`\n Energy in MeV.\n\n cth : `~numpy.ndarray`\n Cosine of the incidence angle.\n\n \"\"\"\n irf = create_irf(event_class, event_type)\n theta = np.degrees(np.arccos(cth))\n v = np.zeros((len(erec), len(egy), len(cth)))\n m = (erec[:,None] / egy[None,:] < 3.0) & (erec[:,None] / egy[None,:] > 0.33333)\n # m |= ((erec[:,None] / egy[None,:] < 3.0) &\n # (erec[:,None] / egy[None,:] > 0.5) & (egy[None,:] < 10**2.5)) \n m = np.broadcast_to(m[:,:,None], v.shape)\n\n try: \n x = np.ones(v.shape)*erec[:,None,None]\n y = np.ones(v.shape)*egy[None,:,None]\n z = np.ones(v.shape)*theta[None,None,:]\n v[m] = irf.edisp().value(np.ravel(x[m]), np.ravel(y[m]), np.ravel(z[m]), 0.0)\n except:\n for i, x in enumerate(egy):\n for j, y in enumerate(theta):\n m = (erec / x < 3.0) & (erec / x > 0.333)\n v[m, i, j] = irf.edisp().value(erec[m], x, y, 0.0)\n \n return v"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates an array of effective areas versus energy and incidence angle.", "response": "def create_aeff(event_class, event_type, egy, cth):\n \"\"\"Create an array of effective areas versus energy and incidence\n angle. Binning in energy and incidence angle is controlled with\n the egy and cth input parameters.\n\n Parameters\n ----------\n event_class : str\n Event class string (e.g. P8R2_SOURCE_V6).\n\n event_type : list\n\n egy : array_like\n Evaluation points in energy (MeV).\n\n cth : array_like\n Evaluation points in cosine of the incidence angle.\n\n \"\"\"\n irf = create_irf(event_class, event_type)\n irf.aeff().setPhiDependence(False)\n theta = np.degrees(np.arccos(cth))\n\n # Exposure Matrix\n # Dimensions are Etrue and incidence angle\n m = np.zeros((len(egy), len(cth)))\n\n for i, x in enumerate(egy):\n for j, y in enumerate(theta):\n m[i, j] = irf.aeff().value(x, y, 0.0)\n\n return m"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncalculating the exposure on a 2D grid of energy and incidence angle bin.", "response": "def calc_exp(skydir, ltc, event_class, event_types,\n egy, cth_bins, npts=None):\n \"\"\"Calculate the exposure on a 2D grid of energy and incidence angle.\n\n Parameters\n ----------\n npts : int \n Number of points by which to sample the response in each\n incidence angle bin. If None then npts will be automatically\n set such that incidence angle is sampled on intervals of <\n 0.05 in Cos(Theta).\n\n Returns\n -------\n exp : `~numpy.ndarray`\n 2D Array of exposures vs. energy and incidence angle.\n\n \"\"\"\n\n if npts is None:\n npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.025))\n\n exp = np.zeros((len(egy), len(cth_bins) - 1))\n cth_bins = utils.split_bin_edges(cth_bins, npts)\n cth = edge_to_center(cth_bins)\n ltw = ltc.get_skydir_lthist(skydir, cth_bins).reshape(-1, npts)\n for et in event_types:\n aeff = create_aeff(event_class, et, egy, cth)\n aeff = aeff.reshape(exp.shape + (npts,))\n exp += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)\n\n return exp"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncalculating the weighted response function.", "response": "def create_avg_rsp(rsp_fn, skydir, ltc, event_class, event_types, x,\n egy, cth_bins, npts=None):\n \"\"\"Calculate the weighted response function.\n \"\"\"\n if npts is None:\n npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05))\n\n wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1))\n exps = np.zeros((len(egy), len(cth_bins) - 1))\n\n cth_bins = utils.split_bin_edges(cth_bins, npts)\n cth = edge_to_center(cth_bins)\n ltw = ltc.get_skydir_lthist(skydir, cth_bins)\n ltw = ltw.reshape(-1, npts)\n\n for et in event_types:\n rsp = rsp_fn(event_class, et, x, egy, cth)\n aeff = create_aeff(event_class, et, egy, cth)\n rsp = rsp.reshape(wrsp.shape + (npts,))\n aeff = aeff.reshape(exps.shape + (npts,))\n wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] *\n ltw[np.newaxis, np.newaxis, :, :], axis=-1)\n exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)\n\n exps_inv = np.zeros_like(exps)\n exps_inv[exps > 0] = 1./exps[exps>0] \n wrsp *= exps_inv[np.newaxis, :, :]\n return wrsp"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create_avg_psf(skydir, ltc, event_class, event_types, dtheta,\n egy, cth_bins, npts=None):\n \"\"\"Generate model for exposure-weighted PSF averaged over incidence\n angle.\n\n Parameters\n ----------\n egy : `~numpy.ndarray`\n Energies in MeV.\n\n cth_bins : `~numpy.ndarray`\n Bin edges in cosine of the incidence angle.\n \"\"\"\n\n return create_avg_rsp(create_psf, skydir, ltc,\n event_class, event_types,\n dtheta, egy, cth_bins, npts)", "response": "Generate model for exposure - weighted PSF averaged over incidence\n angle."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create_avg_edisp(skydir, ltc, event_class, event_types, erec,\n egy, cth_bins, npts=None):\n \"\"\"Generate model for exposure-weighted DRM averaged over incidence\n angle.\n\n Parameters\n ----------\n egy : `~numpy.ndarray`\n True energies in MeV.\n\n cth_bins : `~numpy.ndarray`\n Bin edges in cosine of the incidence angle.\n \"\"\"\n return create_avg_rsp(create_edisp, skydir, ltc,\n event_class, event_types,\n erec, egy, cth_bins, npts)", "response": "Generate model for exposure - weighted DRM averaged over incidence\n angle."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate an exposure - and dispersion - weighted PSF model for a source .", "response": "def create_wtd_psf(skydir, ltc, event_class, event_types, dtheta,\n egy_bins, cth_bins, fn, nbin=64, npts=1):\n \"\"\"Create an exposure- and dispersion-weighted PSF model for a source\n with spectral parameterization ``fn``. The calculation performed\n by this method accounts for the influence of energy dispersion on\n the PSF.\n\n Parameters\n ----------\n dtheta : `~numpy.ndarray`\n\n egy_bins : `~numpy.ndarray`\n Bin edges in observed energy.\n\n cth_bins : `~numpy.ndarray`\n Bin edges in cosine of the true incidence angle.\n\n nbin : int\n Number of bins per decade in true energy.\n\n npts : int\n Number of points by which to oversample each energy bin.\n\n \"\"\"\n #npts = int(np.ceil(32. / bins_per_dec(egy_bins)))\n egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))\n etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)\n etrue = 10**utils.edge_to_center(np.log10(etrue_bins))\n\n psf = create_avg_psf(skydir, ltc, event_class, event_types, dtheta,\n etrue, cth_bins)\n drm = calc_drm(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, nbin=nbin)\n cnts = calc_counts(skydir, ltc, event_class, event_types,\n etrue_bins, cth_bins, fn)\n\n wts = drm * cnts[None, :, :]\n wts_norm = np.sum(wts, axis=1)\n wts_norm[wts_norm == 0] = 1.0\n wts = wts / wts_norm[:, None, :]\n wpsf = np.sum(wts[None, :, :, :] * psf[:, None, :, :], axis=2)\n wts = np.sum(wts[None, :, :, :], axis=2)\n\n if npts > 1:\n shape = (wpsf.shape[0], int(wpsf.shape[1] / npts), npts, wpsf.shape[2])\n wpsf = np.sum((wpsf * wts).reshape(shape), axis=2)\n shape = (wts.shape[0], int(wts.shape[1] / npts), npts, wts.shape[2])\n wpsf = wpsf / np.sum(wts.reshape(shape), axis=2)\n\n return wpsf"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef calc_drm(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, nbin=64):\n \"\"\"Calculate the detector response matrix.\"\"\"\n npts = int(np.ceil(128. / bins_per_dec(egy_bins)))\n egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))\n\n etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)\n egy = 10**utils.edge_to_center(np.log10(egy_bins))\n egy_width = utils.edge_to_width(egy_bins)\n etrue = 10**utils.edge_to_center(np.log10(etrue_bins))\n edisp = create_avg_edisp(skydir, ltc, event_class, event_types,\n egy, etrue, cth_bins)\n edisp = edisp * egy_width[:, None, None]\n edisp = sum_bins(edisp, 0, npts)\n return edisp", "response": "Calculate the detector response matrix."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef calc_counts(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, fn, npts=1):\n \"\"\"Calculate the expected counts vs. true energy and incidence angle\n for a source with spectral parameterization ``fn``.\n\n Parameters\n ----------\n skydir : `~astropy.coordinate.SkyCoord`\n\n ltc : `~fermipy.irfs.LTCube`\n\n egy_bins : `~numpy.ndarray`\n Bin edges in observed energy in MeV.\n\n cth_bins : `~numpy.ndarray`\n Bin edges in cosine of the true incidence angle.\n\n npts : int\n Number of points by which to oversample each energy bin.\n \"\"\"\n #npts = int(np.ceil(32. / bins_per_dec(egy_bins)))\n egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))\n exp = calc_exp(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins)\n dnde = fn.dnde(egy_bins)\n cnts = loglog_quad(egy_bins, exp * dnde[:, None], 0)\n cnts = sum_bins(cnts, 0, npts)\n return cnts", "response": "Calculate the expected counts vs. true energy and incidence angle for a source with spectral parameterization fn."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef calc_counts_edisp(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, fn, nbin=16, npts=1):\n \"\"\"Calculate the expected counts vs. observed energy and true\n incidence angle for a source with spectral parameterization ``fn``.\n\n Parameters\n ----------\n skydir : `~astropy.coordinate.SkyCoord`\n\n ltc : `~fermipy.irfs.LTCube`\n\n egy_bins : `~numpy.ndarray`\n Bin edges in observed energy in MeV.\n\n cth_bins : `~numpy.ndarray`\n Bin edges in cosine of the true incidence angle.\n\n nbin : int \n Number of points per decade with which to sample true energy.\n \n npts : int\n Number of points by which to oversample each reconstructed energy bin.\n\n \"\"\"\n #npts = int(np.ceil(32. / bins_per_dec(egy_bins)))\n\n # Split energy bins\n egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))\n etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)\n drm = calc_drm(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, nbin=nbin)\n cnts_etrue = calc_counts(skydir, ltc, event_class, event_types,\n etrue_bins, cth_bins, fn)\n\n cnts = np.sum(cnts_etrue[None, :, :] * drm[:, :, :], axis=1)\n cnts = sum_bins(cnts, 0, npts)\n return cnts", "response": "Calculate the expected counts vs. observed energy and true incidence angle for a source with spectral parameterization fn."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncalculate the effective exposure.", "response": "def calc_wtd_exp(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, fn, nbin=16):\n \"\"\"Calculate the effective exposure.\n\n Parameters\n ----------\n skydir : `~astropy.coordinates.SkyCoord`\n\n ltc : `~fermipy.irfs.LTCube`\n\n nbin : int \n Number of points per decade with which to sample true energy.\n\n \"\"\"\n cnts = calc_counts_edisp(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, fn, nbin=nbin)\n flux = fn.flux(egy_bins[:-1], egy_bins[1:])\n return cnts / flux[:, None]"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncreate an exposure map from a livetime cube.", "response": "def create(cls, ltc, event_class, event_types, ebins):\n \"\"\"Create an exposure map from a livetime cube. This method will\n generate an exposure map with the same geometry as the\n livetime cube (nside, etc.).\n\n Parameters\n ----------\n ltc : `~fermipy.irfs.LTCube`\n Livetime cube object.\n\n event_class : str\n Event class string.\n\n event_types : list\n List of event type strings, e.g. ['FRONT','BACK'].\n\n ebins : `~numpy.ndarray`\n Energy bin edges in MeV.\n\n \"\"\"\n\n evals = np.sqrt(ebins[1:] * ebins[:-1])\n exp = np.zeros((len(evals), ltc.hpx.npix))\n for et in event_types:\n aeff = create_aeff(event_class, et, evals, ltc.costh_center)\n exp += np.sum(aeff.T[:, :, np.newaxis] *\n ltc.data[:, np.newaxis, :], axis=0)\n\n hpx = HPX(ltc.hpx.nside, ltc.hpx.nest,\n ltc.hpx.coordsys, ebins=ebins)\n return cls(exp, hpx)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nevaluating the PSF at the given energy bin index.", "response": "def eval(self, ebin, dtheta, scale_fn=None):\n \"\"\"Evaluate the PSF at the given energy bin index.\n\n Parameters\n ----------\n ebin : int\n Index of energy bin.\n\n dtheta : array_like\n Array of angular separations in degrees.\n\n scale_fn : callable \n Function that evaluates the PSF scaling function.\n Argument is energy in MeV.\n \"\"\"\n\n if scale_fn is None and self.scale_fn is not None:\n scale_fn = self.scale_fn\n\n if scale_fn is None:\n scale_factor = 1.0\n else:\n dtheta = dtheta / scale_fn(self.energies[ebin])\n scale_factor = 1. / scale_fn(self.energies[ebin])**2\n\n vals = 10**np.interp(dtheta, self.dtheta, np.log10(self.val[:, ebin]))\n return vals * scale_factor"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef interp(self, energies, dtheta, scale_fn=None):\n\n if scale_fn is None and self.scale_fn:\n scale_fn = self.scale_fn\n\n log_energies = np.log10(energies)\n\n shape = (energies * dtheta).shape\n scale_factor = np.ones(shape)\n\n if scale_fn is not None:\n dtheta = dtheta / scale_fn(energies)\n scale_factor = 1. / scale_fn(energies)**2\n\n vals = np.exp(self._psf_fn((dtheta, log_energies)))\n return vals * scale_factor", "response": "Evaluate the PSF model at an array of energies and angularicalical separations in degrees."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nevaluate the bin - averaged PSF model over the energy bins egy_bins and dtheta.", "response": "def interp_bin(self, egy_bins, dtheta, scale_fn=None):\n \"\"\"Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``.\n\n Parameters\n ----------\n egy_bins : array_like\n Energy bin edges in MeV.\n\n dtheta : array_like\n Array of angular separations in degrees.\n\n scale_fn : callable \n Function that evaluates the PSF scaling function.\n Argument is energy in MeV.\n \"\"\"\n\n npts = 4\n egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))\n egy = np.exp(utils.edge_to_center(np.log(egy_bins)))\n log_energies = np.log10(egy)\n\n vals = self.interp(egy[None, :], dtheta[:, None],\n scale_fn=scale_fn)\n wts = np.exp(self._wts_fn((log_energies,)))\n wts = wts.reshape((1,) + wts.shape)\n vals = np.sum(\n (vals * wts).reshape((vals.shape[0], int(vals.shape[1] / npts), npts)), axis=2)\n vals /= np.sum(wts.reshape(wts.shape[0],\n int(wts.shape[1] / npts), npts), axis=2)\n return vals"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef containment_angle(self, energies=None, fraction=0.68, scale_fn=None):\n\n if energies is None:\n energies = self.energies\n\n vals = self.interp(energies[np.newaxis, :], self.dtheta[:, np.newaxis],\n scale_fn=scale_fn)\n dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))\n return self._calc_containment(dtheta, vals, fraction)", "response": "Evaluate the PSF containment angle at a sequence of energies."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef containment_angle_bin(self, egy_bins, fraction=0.68, scale_fn=None):\n\n vals = self.interp_bin(egy_bins, self.dtheta, scale_fn=scale_fn)\n dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))\n return self._calc_containment(dtheta, vals, fraction)", "response": "Evaluate the PSF containment angle averaged over energy bins."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating a new instance of the class with the given parameters.", "response": "def create(cls, skydir, ltc, event_class, event_types, energies, cth_bins=None,\n ndtheta=500, use_edisp=False, fn=None, nbin=64):\n \"\"\"Create a PSFModel object. This class can be used to evaluate the\n exposure-weighted PSF for a source with a given observing\n profile and energy distribution.\n\n Parameters\n ----------\n skydir : `~astropy.coordinates.SkyCoord`\n\n ltc : `~fermipy.irfs.LTCube`\n\n energies : `~numpy.ndarray`\n Grid of energies at which the PSF will be pre-computed.\n\n cth_bins : `~numpy.ndarray`\n Bin edges in cosine of the inclination angle.\n\n use_edisp : bool\n Generate the PSF model accounting for the influence of\n energy dispersion.\n\n fn : `~fermipy.spectrum.SpectralFunction`\n Model for the spectral energy distribution of the source.\n\n \"\"\"\n\n if isinstance(event_types, int):\n event_types = bitmask_to_bits(event_types)\n\n if fn is None:\n fn = spectrum.PowerLaw([1E-13, -2.0])\n\n dtheta = np.logspace(-4, 1.75, ndtheta)\n dtheta = np.insert(dtheta, 0, [0])\n log_energies = np.log10(energies)\n egy_bins = 10**utils.center_to_edge(log_energies)\n\n if cth_bins is None:\n cth_bins = np.array([0.2, 1.0])\n\n if use_edisp:\n psf = create_wtd_psf(skydir, ltc, event_class, event_types,\n dtheta, egy_bins, cth_bins, fn, nbin=nbin)\n wts = calc_counts_edisp(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, fn, nbin=nbin)\n else:\n psf = create_avg_psf(skydir, ltc, event_class, event_types,\n dtheta, energies, cth_bins)\n wts = calc_counts(skydir, ltc, event_class, event_types,\n egy_bins, cth_bins, fn)\n\n exp = calc_exp(skydir, ltc, event_class, event_types,\n energies, cth_bins)\n\n return cls(dtheta, energies, cth_bins, np.squeeze(exp), np.squeeze(psf),\n np.squeeze(wts))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef remove_file(filepath, dry_run=False):\n if dry_run:\n sys.stdout.write(\"rm %s\\n\" % filepath)\n else:\n try:\n os.remove(filepath)\n except OSError:\n pass", "response": "Remove the file at filepath Catches exception"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nremove log file and files created by failed jobs.", "response": "def clean_job(logfile, outfiles, dry_run=False):\n \"\"\"Removes log file and files created by failed jobs.\n\n If dry_run is True, print name of files to be removed, but do not remove them.\n \"\"\"\n remove_file(logfile, dry_run)\n for outfile in outfiles.values():\n remove_file(outfile, dry_run)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef check_log(logfile, exited='Exited with exit code',\n successful='Successfully completed'):\n \"\"\"Check a log file to determine status of LSF job\n\n Often logfile doesn't exist because the job hasn't begun\n to run. It is unclear what you want to do in that case...\n\n Parameters\n ----------\n\n logfile : str\n String with path to logfile\n\n exited : str\n Value to check for in existing logfile for exit with failure\n\n successful : str\n Value to check for in existing logfile for success\n\n Returns str, one of 'Pending', 'Running', 'Done', 'Failed'\n \"\"\"\n if not os.path.exists(logfile):\n return JobStatus.ready\n if exited in open(logfile).read():\n return JobStatus.failed\n elif successful in open(logfile).read():\n return JobStatus.done\n return JobStatus.running", "response": "Check if a log file exists and determine if it is ready to run or failed."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nchecking the status of a specfic job", "response": "def check_job(cls, job_details):\n \"\"\" Check the status of a specfic job \"\"\"\n return check_log(job_details.logfile, cls.string_exited, cls.string_successful)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):\n raise NotImplementedError(\"SysInterface.dispatch_job_hook\")", "response": "Dispatch a single job"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nfunctions to dispatch a single job", "response": "def dispatch_job(self, link, key, job_archive, stream=sys.stdout):\n \"\"\"Function to dispatch a single job\n\n Parameters\n ----------\n\n link : `Link`\n Link object that sendes the job\n\n key : str\n Key used to identify this particular job\n\n job_archive : `JobArchive`\n Archive used to keep track of jobs\n\n Returns `JobDetails` object\n \"\"\"\n try:\n job_details = link.jobs[key]\n except KeyError:\n print(key, link.jobs)\n job_config = job_details.job_config\n link.update_args(job_config)\n logfile = job_config['logfile']\n try:\n self.dispatch_job_hook(link, key, job_config, logfile, stream)\n job_details.status = JobStatus.running\n except IOError:\n job_details.status = JobStatus.failed\n\n if job_archive is not None:\n job_archive.register_job(job_details)\n return job_details"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef submit_jobs(self, link, job_dict=None, job_archive=None, stream=sys.stdout):\n failed = False\n if job_dict is None:\n job_dict = link.jobs\n\n for job_key, job_details in sorted(job_dict.items()):\n job_config = job_details.job_config\n # clean failed jobs\n if job_details.status == JobStatus.failed:\n clean_job(job_details.logfile,\n job_details.outfiles, self._dry_run)\n # clean_job(job_details.logfile, {}, self._dry_run)\n job_config['logfile'] = job_details.logfile\n new_job_details = self.dispatch_job(\n link, job_key, job_archive, stream)\n if new_job_details.status == JobStatus.failed:\n failed = True\n clean_job(new_job_details.logfile,\n new_job_details.outfiles, self._dry_run)\n link.jobs[job_key] = new_job_details\n if failed:\n return JobStatus.failed\n return JobStatus.done", "response": "Submit all of the items job_dict as input."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncleans up all the jobs associated with this link.", "response": "def clean_jobs(self, link, job_dict=None, clean_all=False):\n \"\"\" Clean up all the jobs associated with this link.\n\n Returns a `JobStatus` enum\n \"\"\"\n failed = False\n if job_dict is None:\n job_dict = link.jobs\n\n for job_details in job_dict.values():\n # clean failed jobs\n if job_details.status == JobStatus.failed or clean_all:\n # clean_job(job_details.logfile, job_details.outfiles, self._dry_run)\n clean_job(job_details.logfile, {}, self._dry_run)\n job_details.status = JobStatus.ready\n if failed:\n return JobStatus.failed\n return JobStatus.done"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_function_spec(name):\n if not hasattr(get_function_spec, 'fndict'):\n modelfile = os.path.join('$FERMIPY_ROOT',\n 'data', 'models.yaml')\n modelfile = os.path.expandvars(modelfile)\n get_function_spec.fndict = yaml.load(open(modelfile))\n\n if not name in get_function_spec.fndict.keys():\n raise Exception('Invalid Function Name: %s' % name)\n\n return get_function_spec.fndict[name]", "response": "Returns a dictionary with the specification of a function name and default values."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_spatial_type(spatial_model):\n\n if spatial_model in ['SkyDirFunction', 'PointSource',\n 'Gaussian']:\n return 'SkyDirFunction'\n elif spatial_model in ['SpatialMap']:\n return 'SpatialMap'\n elif spatial_model in ['RadialGaussian', 'RadialDisk']:\n try:\n import pyLikelihood\n if hasattr(pyLikelihood, 'RadialGaussian'):\n return spatial_model\n else:\n return 'SpatialMap'\n except Exception:\n return spatial_model\n else:\n return spatial_model", "response": "Translate a spatial model string to a spatial type."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a dictionary for the parameters of a function.", "response": "def create_pars_from_dict(name, pars_dict, rescale=True, update_bounds=False):\n \"\"\"Create a dictionary for the parameters of a function.\n\n Parameters\n ----------\n name : str\n Name of the function.\n\n pars_dict : dict \n Existing parameter dict that will be merged with the\n default dictionary created by this method.\n\n rescale : bool\n Rescale parameter values.\n\n \"\"\"\n o = get_function_defaults(name)\n pars_dict = pars_dict.copy()\n\n for k in o.keys():\n\n if not k in pars_dict:\n continue\n\n v = pars_dict[k]\n\n if not isinstance(v, dict):\n v = {'name': k, 'value': v}\n\n o[k].update(v)\n\n kw = dict(update_bounds=update_bounds,\n rescale=rescale)\n\n if 'min' in v or 'max' in v:\n kw['update_bounds'] = False\n\n if 'scale' in v:\n kw['rescale'] = False\n\n o[k] = make_parameter_dict(o[k], **kw)\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef make_parameter_dict(pdict, fixed_par=False, rescale=True,\n update_bounds=False):\n \"\"\"\n Update a parameter dictionary. This function will automatically\n set the parameter scale and bounds if they are not defined.\n Bounds are also adjusted to ensure that they encompass the\n parameter value.\n \"\"\"\n o = copy.deepcopy(pdict)\n o.setdefault('scale', 1.0)\n\n if rescale:\n value, scale = utils.scale_parameter(o['value'] * o['scale'])\n o['value'] = np.abs(value) * np.sign(o['value'])\n o['scale'] = np.abs(scale) * np.sign(o['scale'])\n if 'error' in o:\n o['error'] /= np.abs(scale)\n\n if update_bounds:\n o['min'] = o['value'] * 1E-3\n o['max'] = o['value'] * 1E3\n\n if fixed_par:\n o['min'] = o['value']\n o['max'] = o['value']\n\n if float(o['min']) > float(o['value']):\n o['min'] = o['value']\n\n if float(o['max']) < float(o['value']):\n o['max'] = o['value']\n\n return o", "response": "This function creates a parameter dictionary."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncast the bool and float elements of a parameters dict to the appropriate python types.", "response": "def cast_pars_dict(pars_dict):\n \"\"\"Cast the bool and float elements of a parameters dict to\n the appropriate python types.\n \"\"\"\n\n o = {}\n\n for pname, pdict in pars_dict.items():\n\n o[pname] = {}\n\n for k, v in pdict.items():\n\n if k == 'free':\n o[pname][k] = bool(int(v))\n elif k == 'name':\n o[pname][k] = v\n else:\n o[pname][k] = float(v)\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ngather all the HDUs from a list of files", "response": "def do_gather(flist):\n \"\"\" Gather all the HDUs from a list of files\"\"\"\n hlist = []\n nskip = 3\n for fname in flist:\n fin = fits.open(fname)\n if len(hlist) == 0:\n if fin[1].name == 'SKYMAP':\n nskip = 4\n start = 0\n else:\n start = nskip\n for h in fin[start:]:\n hlist.append(h)\n hdulistout = fits.HDUList(hlist)\n return hdulistout"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef n_waiting(self):\n return self._counters[JobStatus.no_job] +\\\n self._counters[JobStatus.unknown] +\\\n self._counters[JobStatus.not_ready] +\\\n self._counters[JobStatus.ready]", "response": "Return the number of jobs in various waiting states"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef n_failed(self):\n return self._counters[JobStatus.failed] + self._counters[JobStatus.partial_failed]", "response": "Return the number of failed jobs"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_status(self):\n if self.n_total == 0:\n return JobStatus.no_job\n elif self.n_done == self.n_total:\n return JobStatus.done\n elif self.n_failed > 0:\n # If more that a quater of the jobs fail, fail the whole thing\n if self.n_failed > self.n_total / 4.:\n return JobStatus.failed\n return JobStatus.partial_failed\n elif self.n_running > 0:\n return JobStatus.running\n elif self.n_pending > 0:\n return JobStatus.pending\n return JobStatus.ready", "response": "Return an overall status based on the number of jobs in various states."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nbuilding and return an astropy. table. Table to store JobDetails", "response": "def make_tables(job_dict):\n \"\"\"Build and return an `astropy.table.Table' to store `JobDetails`\"\"\"\n col_dbkey = Column(name='dbkey', dtype=int)\n col_jobname = Column(name='jobname', dtype='S64')\n col_jobkey = Column(name='jobkey', dtype='S64')\n col_appname = Column(name='appname', dtype='S64')\n col_logfile = Column(name='logfile', dtype='S256')\n col_job_config = Column(name='job_config', dtype='S1024')\n col_timestamp = Column(name='timestamp', dtype=int)\n col_infile_refs = Column(name='infile_refs', dtype=int, shape=(2))\n col_outfile_refs = Column(name='outfile_refs', dtype=int, shape=(2))\n col_rmfile_refs = Column(name='rmfile_refs', dtype=int, shape=(2))\n col_intfile_refs = Column(name='intfile_refs', dtype=int, shape=(2))\n col_status = Column(name='status', dtype=int)\n columns = [col_dbkey, col_jobname, col_jobkey, col_appname,\n col_logfile, col_job_config, col_timestamp,\n col_infile_refs, col_outfile_refs,\n col_rmfile_refs, col_intfile_refs,\n col_status]\n\n table = Table(data=columns)\n\n col_file_ids = Column(name='file_id', dtype=int)\n table_ids = Table(data=[col_file_ids])\n\n for val in job_dict.values():\n val.append_to_tables(table, table_ids)\n return table, table_ids"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nfilling the file ids arrays from the file lists and the status of the job.", "response": "def get_file_ids(self, file_archive, creator=None, status=FileStatus.no_file):\n \"\"\"Fill the file id arrays from the file lists\n\n Parameters\n ----------\n\n file_archive : `FileArchive`\n Used to look up file ids\n\n creator : int\n A unique key for the job that created these file\n\n status : `FileStatus`\n Enumeration giving current status thse files\n \"\"\"\n file_dict = copy.deepcopy(self.file_dict)\n if self.sub_file_dict is not None:\n file_dict.update(self.sub_file_dict.file_dict)\n\n infiles = file_dict.input_files\n outfiles = file_dict.output_files\n rmfiles = file_dict.temp_files\n int_files = file_dict.internal_files\n\n if self.infile_ids is None:\n if infiles is not None:\n self.infile_ids = np.zeros((len(infiles)), int)\n filelist = file_archive.get_file_ids(\n infiles, creator, FileStatus.expected, file_dict)\n JobDetails._fill_array_from_list(filelist, self.infile_ids)\n else:\n self.infile_ids = np.zeros((0), int)\n if self.outfile_ids is None:\n if outfiles is not None:\n self.outfile_ids = np.zeros((len(outfiles)), int)\n filelist = file_archive.get_file_ids(\n outfiles, creator, status, file_dict)\n JobDetails._fill_array_from_list(filelist, self.outfile_ids)\n else:\n self.outfile_ids = np.zeros((0), int)\n if self.rmfile_ids is None:\n if rmfiles is not None:\n self.rmfile_ids = np.zeros((len(rmfiles)), int)\n filelist = file_archive.get_file_ids(rmfiles)\n JobDetails._fill_array_from_list(filelist, self.rmfile_ids)\n else:\n self.rmfile_ids = np.zeros((0), int)\n if self.intfile_ids is None:\n if int_files is not None:\n self.intfile_ids = np.zeros((len(int_files)), int)\n filelist = file_archive.get_file_ids(\n int_files, creator, status)\n JobDetails._fill_array_from_list(filelist, self.intfile_ids)\n else:\n self.intfile_ids = np.zeros((0), int)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_file_paths(self, file_archive, file_id_array):\n full_list = []\n status_dict = {}\n full_list += file_archive.get_file_paths(\n file_id_array[self.infile_ids])\n full_list += file_archive.get_file_paths(\n file_id_array[self.outfile_ids])\n full_list += file_archive.get_file_paths(\n file_id_array[self.rmfile_ids])\n full_list += file_archive.get_file_paths(\n file_id_array[self.intfile_ids])\n for filepath in full_list:\n handle = file_archive.get_handle(filepath)\n status_dict[filepath] = handle.status\n if self.file_dict is None:\n self.file_dict = FileDict()\n self.file_dict.update(status_dict)", "response": "Get the full paths of the files used by this object from the id arrays containing the file ids."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nfill an array from a list", "response": "def _fill_array_from_list(the_list, the_array):\n \"\"\"Fill an `array` from a `list`\"\"\"\n for i, val in enumerate(the_list):\n the_array[i] = val\n return the_array"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nbuilds a dictionary map int to JobDetails", "response": "def make_dict(cls, table):\n \"\"\"Build a dictionary map int to `JobDetails` from an `astropy.table.Table`\"\"\"\n ret_dict = {}\n for row in table:\n job_details = cls.create_from_row(row)\n ret_dict[job_details.dbkey] = job_details\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncreate a JobDetails object from an astropy. table. row. Row object.", "response": "def create_from_row(cls, table_row):\n \"\"\"Create a `JobDetails` from an `astropy.table.row.Row` \"\"\"\n kwargs = {}\n for key in table_row.colnames:\n kwargs[key] = table_row[key]\n\n infile_refs = kwargs.pop('infile_refs')\n outfile_refs = kwargs.pop('outfile_refs')\n rmfile_refs = kwargs.pop('rmfile_refs')\n intfile_refs = kwargs.pop('intfile_refs')\n\n kwargs['infile_ids'] = np.arange(infile_refs[0], infile_refs[1])\n kwargs['outfile_ids'] = np.arange(outfile_refs[0], outfile_refs[1])\n kwargs['rmfile_ids'] = np.arange(rmfile_refs[0], rmfile_refs[1])\n kwargs['intfile_ids'] = np.arange(intfile_refs[0], intfile_refs[1])\n return cls(**kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nappending this instance to a table.", "response": "def append_to_tables(self, table, table_ids):\n \"\"\"Add this instance as a row on a `astropy.table.Table` \"\"\"\n infile_refs = np.zeros((2), int)\n outfile_refs = np.zeros((2), int)\n rmfile_refs = np.zeros((2), int)\n intfile_refs = np.zeros((2), int)\n f_ptr = len(table_ids['file_id'])\n infile_refs[0] = f_ptr\n if self.infile_ids is not None:\n for fid in self.infile_ids:\n table_ids.add_row(dict(file_id=fid))\n f_ptr += 1\n infile_refs[1] = f_ptr\n outfile_refs[0] = f_ptr\n if self.outfile_ids is not None:\n for fid in self.outfile_ids:\n table_ids.add_row(dict(file_id=fid))\n f_ptr += 1\n outfile_refs[1] = f_ptr\n rmfile_refs[0] = f_ptr\n if self.rmfile_ids is not None:\n for fid in self.rmfile_ids:\n table_ids.add_row(dict(file_id=fid))\n f_ptr += 1\n rmfile_refs[1] = f_ptr\n intfile_refs[0] = f_ptr\n if self.intfile_ids is not None:\n for fid in self.intfile_ids:\n table_ids.add_row(dict(file_id=fid))\n f_ptr += 1\n intfile_refs[1] = f_ptr\n\n table.add_row(dict(dbkey=self.dbkey,\n jobname=self.jobname,\n jobkey=self.jobkey,\n appname=self.appname,\n logfile=self.logfile,\n job_config=str(self.job_config),\n timestamp=self.timestamp,\n infile_refs=infile_refs,\n outfile_refs=outfile_refs,\n rmfile_refs=rmfile_refs,\n intfile_refs=intfile_refs,\n status=self.status))"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nupdates the values of a row in a table.", "response": "def update_table_row(self, table, row_idx):\n \"\"\"Add this instance as a row on a `astropy.table.Table` \"\"\"\n try:\n table[row_idx]['timestamp'] = self.timestamp\n table[row_idx]['status'] = self.status\n except IndexError:\n print(\"Index error\", len(table), row_idx)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef check_status_logfile(self, checker_func):\n self.status = checker_func(self.logfile)\n return self.status", "response": "Check on the status of this particular job using the logfile"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfills the cache from the table.", "response": "def _fill_cache(self):\n \"\"\"Fill the cache from the `astropy.table.Table`\"\"\"\n for irow in range(len(self._table)):\n job_details = self.make_job_details(irow)\n self._cache[job_details.fullkey] = job_details"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _read_table_file(self, table_file):\n self._table_file = table_file\n if os.path.exists(self._table_file):\n self._table = Table.read(self._table_file, hdu='JOB_ARCHIVE')\n self._table_ids = Table.read(self._table_file, hdu='FILE_IDS')\n else:\n self._table, self._table_ids = JobDetails.make_tables({})\n self._table_id_array = self._table_ids['file_id'].data\n self._fill_cache()", "response": "Read an astropy. table. Table from table_file to set up the JobArchive"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncreates a JobDetails object from an astropy. table. row. Row", "response": "def make_job_details(self, row_idx):\n \"\"\"Create a `JobDetails` from an `astropy.table.row.Row` \"\"\"\n row = self._table[row_idx]\n job_details = JobDetails.create_from_row(row)\n job_details.get_file_paths(self._file_archive, self._table_id_array)\n self._cache[job_details.fullkey] = job_details\n return job_details"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_details(self, jobname, jobkey):\n fullkey = JobDetails.make_fullkey(jobname, jobkey)\n return self._cache[fullkey]", "response": "Get the JobDetails associated to a particular job instance"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nregister a job in this JobArchive.", "response": "def register_job(self, job_details):\n \"\"\"Register a job in this `JobArchive` \"\"\"\n # check to see if the job already exists\n try:\n job_details_old = self.get_details(job_details.jobname,\n job_details.jobkey)\n if job_details_old.status <= JobStatus.running:\n job_details_old.status = job_details.status\n job_details_old.update_table_row(\n self._table, job_details_old.dbkey - 1)\n job_details = job_details_old\n except KeyError:\n job_details.dbkey = len(self._table) + 1\n job_details.get_file_ids(\n self._file_archive, creator=job_details.dbkey)\n job_details.append_to_tables(self._table, self._table_ids)\n self._table_id_array = self._table_ids['file_id'].data\n self._cache[job_details.fullkey] = job_details\n return job_details"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nregisters a bunch of jobs in this archive", "response": "def register_jobs(self, job_dict):\n \"\"\"Register a bunch of jobs in this archive\"\"\"\n njobs = len(job_dict)\n sys.stdout.write(\"Registering %i total jobs: \" % njobs)\n for i, job_details in enumerate(job_dict.values()):\n if i % 10 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n self.register_job(job_details)\n sys.stdout.write('!\\n')"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nregister a job in the JobArchive from a Link object", "response": "def register_job_from_link(self, link, key, **kwargs):\n \"\"\"Register a job in the `JobArchive` from a `Link` object \"\"\"\n job_config = kwargs.get('job_config', None)\n if job_config is None:\n job_config = link.args\n status = kwargs.get('status', JobStatus.unknown)\n job_details = JobDetails(jobname=link.linkname,\n jobkey=key,\n appname=link.appname,\n logfile=kwargs.get('logfile'),\n jobconfig=job_config,\n timestamp=get_timestamp(),\n file_dict=copy.deepcopy(link.files),\n sub_file_dict=copy.deepcopy(link.sub_files),\n status=status)\n self.register_job(job_details)\n return job_details"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef update_job(self, job_details):\n other = self.get_details(job_details.jobname,\n job_details.jobkey)\n other.timestamp = job_details.timestamp\n other.status = job_details.status\n other.update_table_row(self._table, other.dbkey - 1)\n return other", "response": "Update a job in the JobArchive"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmarks all jobs that match a mask as removed", "response": "def remove_jobs(self, mask):\n \"\"\"Mark all jobs that match a mask as 'removed' \"\"\"\n jobnames = self.table[mask]['jobname']\n jobkey = self.table[mask]['jobkey']\n self.table[mask]['status'] = JobStatus.removed\n for jobname, jobkey in zip(jobnames, jobkey):\n fullkey = JobDetails.make_fullkey(jobname, jobkey)\n self._cache.pop(fullkey).status = JobStatus.removed\n self.write_table_file()"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef build_temp_job_archive(cls):\n try:\n os.unlink('job_archive_temp.fits')\n os.unlink('file_archive_temp.fits')\n except OSError:\n pass\n\n cls._archive = cls(job_archive_table='job_archive_temp.fits',\n file_archive_table='file_archive_temp.fits',\n base_path=os.path.abspath('.') + '/')\n return cls._archive", "response": "Build and return a JobArchive using the locations of\n persistent files."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef write_table_file(self, job_table_file=None, file_table_file=None):\n if self._table is None:\n raise RuntimeError(\"No table to write\")\n if self._table_ids is None:\n raise RuntimeError(\"No ID table to write\")\n if job_table_file is not None:\n self._table_file = job_table_file\n if self._table_file is None:\n raise RuntimeError(\"No output file specified for table\")\n write_tables_to_fits(self._table_file, [self._table, self._table_ids], clobber=True,\n namelist=['JOB_ARCHIVE', 'FILE_IDS'])\n self._file_archive.write_table_file(file_table_file)", "response": "Write the table to the file_table_file"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate the status of all the jobs in the archive", "response": "def update_job_status(self, checker_func):\n \"\"\"Update the status of all the jobs in the archive\"\"\"\n njobs = len(self.cache.keys())\n status_vect = np.zeros((8), int)\n sys.stdout.write(\"Updating status of %i jobs: \" % njobs)\n sys.stdout.flush()\n for i, key in enumerate(self.cache.keys()):\n if i % 200 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n job_details = self.cache[key]\n if job_details.status in [JobStatus.pending, JobStatus.running]:\n if checker_func:\n job_details.check_status_logfile(checker_func)\n job_details.update_table_row(self._table, job_details.dbkey - 1)\n status_vect[job_details.status] += 1\n\n sys.stdout.write(\"!\\n\")\n sys.stdout.flush()\n sys.stdout.write(\"Summary:\\n\")\n sys.stdout.write(\" Unknown: %i\\n\" % status_vect[JobStatus.unknown])\n sys.stdout.write(\" Not Ready: %i\\n\" %\n status_vect[JobStatus.not_ready])\n sys.stdout.write(\" Ready: %i\\n\" % status_vect[JobStatus.ready])\n sys.stdout.write(\" Pending: %i\\n\" % status_vect[JobStatus.pending])\n sys.stdout.write(\" Running: %i\\n\" % status_vect[JobStatus.running])\n sys.stdout.write(\" Done: %i\\n\" % status_vect[JobStatus.done])\n sys.stdout.write(\" Failed: %i\\n\" % status_vect[JobStatus.failed])\n sys.stdout.write(\" Partial: %i\\n\" %\n status_vect[JobStatus.partial_failed])"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the singleton JobArchive instance if needed", "response": "def build_archive(cls, **kwargs):\n \"\"\"Return the singleton `JobArchive` instance, building it if needed \"\"\"\n if cls._archive is None:\n cls._archive = cls(**kwargs)\n return cls._archive"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ngets the elapsed time.", "response": "def elapsed_time(self):\n \"\"\"Get the elapsed time.\"\"\"\n\n # Timer is running\n if self._t0 is not None:\n return self._time + self._get_time()\n else:\n return self._time"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nconstructing and return a roi_model. Source object", "response": "def make_spatialmap_source(name, Spatial_Filename, spectrum):\n \"\"\"Construct and return a `fermipy.roi_model.Source` object\n \"\"\"\n data = dict(Spatial_Filename=Spatial_Filename, \n ra=0.0, dec=0.0,\n SpatialType='SpatialMap',\n Source_Name=name)\n if spectrum is not None:\n data.update(spectrum)\n\n return roi_model.Source(name, data)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef make_mapcube_source(name, Spatial_Filename, spectrum):\n data = dict(Spatial_Filename=Spatial_Filename)\n if spectrum is not None:\n data.update(spectrum)\n\n return roi_model.MapCubeSource(name, data)", "response": "Construct and return a fermipy. roi_model. MapCubeSource object"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nconstructing and return an Isoropic Source object", "response": "def make_isotropic_source(name, Spectrum_Filename, spectrum):\n \"\"\"Construct and return a `fermipy.roi_model.IsoSource` object\n \"\"\"\n data = dict(Spectrum_Filename=Spectrum_Filename)\n if spectrum is not None:\n data.update(spectrum)\n\n return roi_model.IsoSource(name, data)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nconstructs and return a fermipy. roi_model. CompositeSource object", "response": "def make_composite_source(name, spectrum):\n \"\"\"Construct and return a `fermipy.roi_model.CompositeSource` object\n \"\"\"\n data = dict(SpatialType='CompositeSource',\n SpatialModel='CompositeSource',\n SourceType='CompositeSource')\n if spectrum is not None:\n data.update(spectrum)\n return roi_model.CompositeSource(name, data)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef make_catalog_sources(catalog_roi_model, source_names):\n sources = {}\n for source_name in source_names:\n sources[source_name] = catalog_roi_model[source_name]\n return sources", "response": "Construct and return a dictionary of sources that are a subset of sources\n in catalog_roi_model."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nmaking dictionary mapping component keys to a source or set of sources", "response": "def make_sources(comp_key, comp_dict):\n \"\"\"Make dictionary mapping component keys to a source\n or set of sources\n\n Parameters\n ----------\n\n comp_key : str\n Key used to access sources\n\n comp_dict : dict\n Information used to build sources\n\n return `OrderedDict` maping comp_key to `fermipy.roi_model.Source`\n \"\"\"\n srcdict = OrderedDict()\n try:\n comp_info = comp_dict.info\n except AttributeError:\n comp_info = comp_dict\n try:\n spectrum = comp_dict.spectrum\n except AttributeError:\n spectrum = None\n\n model_type = comp_info.model_type\n if model_type == 'PointSource':\n srcdict[comp_key] = make_point_source(comp_info.source_name,\n comp_info.src_dict)\n elif model_type == 'SpatialMap':\n srcdict[comp_key] = make_spatialmap_source(comp_info.source_name,\n comp_info.Spatial_Filename,\n spectrum)\n elif model_type == 'MapCubeSource':\n srcdict[comp_key] = make_mapcube_source(comp_info.source_name,\n comp_info.Spatial_Filename,\n spectrum)\n elif model_type == 'IsoSource':\n srcdict[comp_key] = make_isotropic_source(comp_info.source_name,\n comp_info.Spectral_Filename,\n spectrum)\n elif model_type == 'CompositeSource':\n srcdict[comp_key] = make_composite_source(comp_info.source_name,\n spectrum)\n elif model_type == 'CatalogSources':\n srcdict.update(make_catalog_sources(comp_info.roi_model,\n comp_info.source_names))\n else:\n raise ValueError(\"Unrecognized model_type %s\" % model_type)\n return srcdict"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef add_sources(self, source_info_dict):\n self._source_info_dict.update(source_info_dict)\n for key, value in source_info_dict.items():\n self._sources.update(make_sources(key, value))", "response": "Add all of the sources in source_info_dict to this factory s _sources list."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef build_catalog(**kwargs):\n catalog_type = kwargs.get('catalog_type')\n catalog_file = kwargs.get('catalog_file')\n catalog_extdir = kwargs.get('catalog_extdir')\n if catalog_type == '2FHL':\n return catalog.Catalog2FHL(fitsfile=catalog_file, extdir=catalog_extdir)\n elif catalog_type == '3FGL':\n return catalog.Catalog3FGL(fitsfile=catalog_file, extdir=catalog_extdir)\n elif catalog_type == '4FGLP':\n return catalog.Catalog4FGLP(fitsfile=catalog_file, extdir=catalog_extdir)\n elif catalog_type == 'FL8Y':\n return catalog.CatalogFL8Y(fitsfile=catalog_file, extdir=catalog_extdir)\n else:\n table = Table.read(catalog_file)\n return catalog.Catalog(table, extdir=catalog_extdir)", "response": "Build a fermipy. catalog. Catalog object from a FITS file with catalog tables and extended source templates."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef make_fermipy_roi_model_from_catalogs(cataloglist):\n data = dict(catalogs=cataloglist,\n src_roiwidth=360.)\n return roi_model.ROIModel(data, skydir=SkyCoord(0.0, 0.0, unit='deg'))", "response": "Build and return a fermipy. roi_model. ROIModel object from a list of fermipy. catalog. Catalog objects\n "} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nbuilds and return a fermipy. roi_model. ROIModel object from a dict with information about the sources", "response": "def make_roi(cls, sources=None):\n \"\"\"Build and return a `fermipy.roi_model.ROIModel` object from\n a dict with information about the sources\n \"\"\"\n if sources is None:\n sources = {}\n src_fact = cls()\n src_fact.add_sources(sources)\n ret_model = roi_model.ROIModel(\n {}, skydir=SkyCoord(0.0, 0.0, unit='deg'))\n for source in src_fact.sources.values():\n ret_model.load_source(source,\n build_index=False, merge_sources=False)\n return ret_model"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nbuilds and return a Fermipy. roi_model. ROIModel object by copying selected sources from another such object .", "response": "def copy_selected_sources(cls, roi, source_names):\n \"\"\"Build and return a `fermipy.roi_model.ROIModel` object\n by copying selected sources from another such object\n \"\"\"\n roi_new = cls.make_roi()\n for source_name in source_names:\n try:\n src_cp = roi.copy_source(source_name)\n except Exception:\n continue\n roi_new.load_source(src_cp, build_index=False)\n return roi_new"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef build_from_yamlfile(yamlfile):\n d = yaml.load(open(yamlfile))\n return MktimeFilterDict(d['aliases'], d['selections'])", "response": "Build a list of components from a yaml file"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncollecting all the jobs in a list of directories.", "response": "def collect_jobs(dirs, runscript, overwrite=False, max_job_age=90):\n \"\"\"Construct a list of job dictionaries.\"\"\"\n\n jobs = []\n\n for dirname in sorted(dirs):\n\n o = dict(cfgfile=os.path.join(dirname, 'config.yaml'),\n logfile=os.path.join(\n dirname, os.path.splitext(runscript)[0] + '.log'),\n runscript=os.path.join(dirname, runscript))\n\n if not os.path.isfile(o['cfgfile']):\n continue\n\n if not os.path.isfile(o['runscript']):\n continue\n\n if not os.path.isfile(o['logfile']):\n jobs.append(o)\n continue\n\n age = file_age_in_seconds(o['logfile']) / 60.\n job_status = check_log(o['logfile'])\n\n print(dirname, job_status, age)\n\n if job_status is False or overwrite:\n jobs.append(o)\n elif job_status == 'Exited':\n print(\"Job Exited. Resending command.\")\n jobs.append(o)\n elif job_status == 'None' and age > max_job_age:\n print(\n \"Job did not exit, but no activity on log file for > %.2f min. Resending command.\" % max_job_age)\n jobs.append(o)\n # elif job_status is True:\n # print(\"Job Completed. Resending command.\")\n # jobs.append(o)\n\n return jobs"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncomputes the source map for a given model.", "response": "def make_srcmap_old(psf, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,\n cdelt=0.01, rebin=1, psf_scale_fn=None):\n \"\"\"Compute the source map for a given spatial model.\n\n Parameters\n ----------\n psf : `~fermipy.irfs.PSFModel`\n spatial_model : str\n Spatial model.\n sigma : float\n Spatial size parameter for extended models.\n xpix : float\n Source position in pixel coordinates in X dimension.\n ypix : float\n Source position in pixel coordinates in Y dimension.\n rebin : int \n Factor by which the original map will be oversampled in the\n spatial dimension when computing the model.\n psf_scale_fn : callable \n Function that evaluates the PSF scaling function.\n Argument is energy in MeV.\n \"\"\"\n if rebin > 1:\n npix = npix * rebin\n xpix = xpix * rebin + (rebin - 1.0) / 2.\n ypix = ypix * rebin + (rebin - 1.0) / 2.\n cdelt = cdelt / rebin\n\n if spatial_model == 'RadialGaussian':\n k = utils.make_cgauss_kernel(psf, sigma, npix, cdelt,\n xpix, ypix, psf_scale_fn)\n elif spatial_model == 'RadialDisk':\n k = utils.make_cdisk_kernel(psf, sigma, npix, cdelt,\n xpix, ypix, psf_scale_fn)\n elif spatial_model == 'PointSource':\n k = utils.make_psf_kernel(psf, npix, cdelt,\n xpix, ypix, psf_scale_fn)\n else:\n raise Exception('Unsupported spatial model: %s', spatial_model)\n\n if rebin > 1:\n k = utils.sum_bins(k, 1, rebin)\n k = utils.sum_bins(k, 2, rebin)\n\n k *= psf.exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2\n return k"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef make_srcmap(psf, exp, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,\n cdelt=0.01, psf_scale_fn=None, klims=None, sparse=False):\n \"\"\"Compute the source map for a given spatial model.\n\n Parameters\n ----------\n psf : `~fermipy.irfs.PSFModel`\n\n exp : `~numpy.ndarray`\n Array of exposures.\n\n spatial_model : str\n Spatial model.\n\n sigma : float\n Spatial size parameter for extended models.\n\n xpix : float\n Source position in pixel coordinates in X dimension.\n\n ypix : float\n Source position in pixel coordinates in Y dimension.\n\n psf_scale_fn : callable \n Function that evaluates the PSF scaling function.\n Argument is energy in MeV.\n\n klims : tuple\n Indices of lower and upper range of energy.\n\n sparse : bool \n Skip pixels in which the source amplitude is small.\n\n \"\"\"\n if spatial_model == 'RadialGaussian':\n k = utils.make_radial_kernel(psf, utils.convolve2d_gauss,\n sigma / 1.5095921854516636, npix, cdelt,\n xpix, ypix, psf_scale_fn, klims=klims,\n sparse=sparse)\n elif spatial_model == 'RadialDisk':\n k = utils.make_radial_kernel(psf, utils.convolve2d_disk,\n sigma / 0.8246211251235321, npix, cdelt,\n xpix, ypix, psf_scale_fn, klims=klims,\n sparse=sparse)\n elif spatial_model == 'PointSource':\n k = utils.make_radial_kernel(psf, None, None, npix, cdelt,\n xpix, ypix, psf_scale_fn, klims=klims,\n sparse=sparse)\n else:\n raise Exception('Unsupported spatial model: %s', spatial_model)\n\n if klims is not None:\n exp = exp[klims[0]:klims[1] + 1, ...]\n\n k *= exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2\n return k", "response": "Compute the source map for a given PSF model."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ndeleting a map from a binned analysis source map file if it exists.", "response": "def delete_source_map(srcmap_file, names, logger=None):\n \"\"\"Delete a map from a binned analysis source map file if it exists.\n\n Parameters\n ----------\n srcmap_file : str\n Path to the source map file.\n\n names : list\n List of HDU keys of source maps to be deleted.\n\n \"\"\"\n with fits.open(srcmap_file) as hdulist:\n hdunames = [hdu.name.upper() for hdu in hdulist]\n\n if not isinstance(names, list):\n names = [names]\n\n for name in names:\n if not name.upper() in hdunames:\n continue\n del hdulist[name.upper()]\n\n hdulist.writeto(srcmap_file, overwrite=True)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_offsets(self, pix):\n\n idx = []\n for i in range(self.ndim):\n\n if i == 0:\n idx += [0]\n else:\n npix1 = int(self.shape[i])\n pix0 = int(pix[i - 1]) - npix1 // 2\n idx += [pix0]\n\n return idx", "response": "Get the offsets of the first pixel in each dimension in the the\n global coordinate system."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates a new map that is shifted to the pixel coordinates pix.", "response": "def shift_to_coords(self, pix, fill_value=np.nan):\n \"\"\"Create a new map that is shifted to the pixel coordinates\n ``pix``.\"\"\"\n\n pix_offset = self.get_offsets(pix)\n dpix = np.zeros(len(self.shape) - 1)\n for i in range(len(self.shape) - 1):\n x = self.rebin * (pix[i] - pix_offset[i + 1]\n ) + (self.rebin - 1.0) / 2.\n dpix[i] = x - self._pix_ref[i]\n\n pos = [pix_offset[i] + self.shape[i] // 2\n for i in range(self.data.ndim)]\n s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos)\n\n k = np.zeros(self.data.shape)\n for i in range(k.shape[0]):\n k[i] = shift(self._data_spline[i], dpix, cval=np.nan,\n order=2, prefilter=False)\n\n for i in range(1, len(self.shape)):\n k = utils.sum_bins(k, i, self.rebin)\n\n k0 = np.ones(self.shape_out) * fill_value\n\n if k[s1].size == 0 or k0[s0].size == 0:\n return k0\n k0[s0] = k[s1]\n return k0"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\ncreating a new map with reference pixel coordinates shifted to the pixel coordinates pix.", "response": "def create_map(self, pix):\n \"\"\"Create a new map with reference pixel coordinates shifted\n to the pixel coordinates ``pix``.\n\n Parameters\n ----------\n pix : `~numpy.ndarray`\n Reference pixel of new map.\n\n Returns\n -------\n out_map : `~numpy.ndarray`\n The shifted map. \n \"\"\"\n k0 = self._m0.shift_to_coords(pix)\n k1 = self._m1.shift_to_coords(pix)\n\n k0[np.isfinite(k1)] = k1[np.isfinite(k1)]\n k0[~np.isfinite(k0)] = 0\n return k0"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nconverting git release tag into a form that is PEP440 compliant.", "response": "def render_pep440(vcs):\n \"\"\"Convert git release tag into a form that is PEP440 compliant.\"\"\"\n\n if vcs is None:\n return None\n\n tags = vcs.split('-')\n\n # Bare version number\n if len(tags) == 1:\n return tags[0]\n else:\n return tags[0] + '+' + '.'.join(tags[1:])"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreading the release version from _version. py.", "response": "def read_release_version():\n \"\"\"Read the release version from ``_version.py``.\"\"\"\n import re\n dirname = os.path.abspath(os.path.dirname(__file__))\n\n try:\n f = open(os.path.join(dirname, \"_version.py\"), \"rt\")\n for line in f.readlines():\n\n m = re.match(\"__version__ = '([^']+)'\", line)\n if m:\n ver = m.group(1)\n return ver\n\n except:\n return None\n\n return None"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nwrite the release version to _version. py.", "response": "def write_release_version(version):\n \"\"\"Write the release version to ``_version.py``.\"\"\"\n dirname = os.path.abspath(os.path.dirname(__file__))\n f = open(os.path.join(dirname, \"_version.py\"), \"wt\")\n f.write(\"__version__ = '%s'\\n\" % version)\n f.close()"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nmaking a full file path by combining tokens and origname.", "response": "def make_full_path(basedir, outkey, origname):\n \"\"\"Make a full file path by combining tokens\n\n Parameters\n -----------\n\n basedir : str\n The top level output area\n\n outkey : str\n The key for the particular instance of the analysis\n\n origname : str\n Template for the output file name\n\n Returns\n -------\n\n outpath : str\n This will be ::.fits \n Where newname = origname.replace('.fits', '_.fits')\n\n \"\"\"\n return os.path.join(basedir, outkey,\n os.path.basename(origname).replace('.fits',\n '_%s.fits' % outkey))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n comp_file = args.get('comp', None)\n datafile = args.get('data', None)\n if is_null(comp_file):\n return\n if is_null(datafile):\n return\n\n NAME_FACTORY.update_base_dict(args['data'])\n\n outdir = args.get('outdir')\n outkey = args.get('outkey')\n ft1file = args['ft1file']\n ft2file = args['ft2file']\n if is_null(outdir) or is_null(outkey):\n return\n pfiles = os.path.join(outdir, outkey)\n\n self.comp_dict = yaml.safe_load(open(comp_file))\n coordsys = self.comp_dict.pop('coordsys')\n\n full_out_dir = make_nfs_path(os.path.join(outdir, outkey))\n\n for key_e, comp_e in sorted(self.comp_dict.items()):\n emin = math.pow(10., comp_e['log_emin'])\n emax = math.pow(10., comp_e['log_emax'])\n enumbins = comp_e['enumbins']\n zmax = comp_e['zmax']\n zcut = \"zmax%i\" % comp_e['zmax']\n evclassstr = NAME_FACTORY.base_dict['evclass']\n\n kwargs_select = dict(zcut=zcut,\n ebin=key_e,\n psftype='ALL',\n coordsys=coordsys)\n linkname = 'select-energy-%s-%s' % (key_e, zcut)\n selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select))\n self._set_link(linkname, Gtlink_select,\n infile=ft1file,\n outfile=selectfile_energy,\n zmax=zmax,\n emin=emin,\n emax=emax,\n evclass=NAME_FACTORY.evclassmask(evclassstr),\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname))\n\n if 'mktimefilters' in comp_e:\n mktimefilters = comp_e['mktimefilters']\n else:\n mktimefilters = ['none']\n\n for mktimekey in mktimefilters:\n kwargs_mktime = kwargs_select.copy()\n kwargs_mktime['mktime'] = mktimekey\n filterstring = MKTIME_DICT[mktimekey]\n mktime_file = make_full_path(outdir, outkey, NAME_FACTORY.mktime(**kwargs_mktime))\n ltcube_file = make_full_path(outdir, outkey, NAME_FACTORY.ltcube(**kwargs_mktime))\n linkname_mktime = 'mktime-%s-%s-%s' % (key_e, zcut, mktimekey)\n linkname_ltcube = 'ltcube-%s-%s-%s' % (key_e, zcut, mktimekey)\n\n self._set_link(linkname_mktime, Gtlink_mktime,\n evfile=selectfile_energy,\n outfile=mktime_file,\n scfile=ft2file,\n filter=filterstring,\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname_mktime))\n self._set_link(linkname_ltcube, Gtlink_ltcube,\n evfile=mktime_file,\n outfile=ltcube_file,\n scfile=ft2file,\n zmax=zmax,\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname_ltcube))\n\n if 'evtclasses' in comp_e:\n evtclasslist_vals = comp_e['evtclasses']\n else:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n\n for evtclassval in evtclasslist_vals:\n for psf_type, psf_dict in sorted(comp_e['psf_types'].items()):\n linkname_select = 'select-type-%s-%s-%s-%s-%s' % (\n key_e, zcut, mktimekey, evtclassval, psf_type)\n linkname_bin = 'bin-%s-%s-%s-%s-%s' % (key_e,\n zcut, mktimekey,\n evtclassval, psf_type)\n kwargs_bin = kwargs_mktime.copy()\n kwargs_bin['psftype'] = psf_type\n kwargs_bin['coordsys'] = coordsys\n kwargs_bin['evclass'] = evtclassval\n selectfile_psf = make_full_path(\n outdir, outkey, NAME_FACTORY.select(**kwargs_bin))\n binfile_psf = make_full_path(\n outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin))\n hpx_order_psf = min(args['hpx_order_max'], psf_dict['hpx_order'])\n linkname_select = 'select-type-%s-%s-%s-%s-%s' % (key_e, zcut,\n mktimekey, evtclassval,\n psf_type)\n linkname_bin = 'bin-%s-%s-%s-%s-%s' % (key_e, zcut, mktimekey,\n evtclassval, psf_type)\n\n self._set_link(linkname_select, Gtlink_select,\n infile=selectfile_energy,\n outfile=selectfile_psf,\n zmax=zmax,\n emin=emin,\n emax=emax,\n evtype=EVT_TYPE_DICT[psf_type],\n evclass=NAME_FACTORY.evclassmask(evtclassval),\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname_select))\n self._set_link(linkname_bin, Gtlink_bin,\n coordsys=coordsys,\n hpx_order=hpx_order_psf,\n evfile=selectfile_psf,\n outfile=binfile_psf,\n emin=emin,\n emax=emax,\n enumbins=enumbins,\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname_bin))"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef build_job_configs(self, args):\n job_configs = {}\n\n comp_file = args.get('comp', None)\n if comp_file is not None:\n comp_dict = yaml.safe_load(open(comp_file))\n coordsys = comp_dict.pop('coordsys')\n for v in comp_dict.values():\n v['coordsys'] = coordsys\n else:\n return job_configs\n\n datafile = args['data']\n if datafile is None or datafile == 'None':\n return job_configs\n NAME_FACTORY.update_base_dict(args['data'])\n\n inputfiles = create_inputlist(args['ft1file'])\n outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')\n data_ver = NAME_FACTORY.base_dict['data_ver']\n\n for idx, infile in enumerate(inputfiles):\n key = \"%06i\" % idx\n key_scfile = \"%03i\" % (idx + 1)\n output_dir = os.path.join(outdir_base, key)\n try:\n os.mkdir(output_dir)\n except OSError:\n pass\n scfile = args['ft2file'].replace('.lst', '_%s.fits' % key_scfile)\n logfile = make_nfs_path(os.path.join(output_dir,\n 'scatter_mk_%s_%s.log' % (data_ver, key)))\n\n job_configs[key] = comp_dict.copy()\n job_configs[key].update(dict(ft1file=infile,\n scfile=scfile,\n comp=args['comp'],\n hpx_order_max=args['hpx_order_max'],\n outdir=outdir_base,\n outkey=key,\n logfile=logfile,\n pfiles=output_dir))\n\n return job_configs", "response": "Hook to build job configurations based on input arguments"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nmapping from the top - level arguments to the top - level arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n data = args.get('data')\n comp = args.get('comp')\n ft1file = args.get('ft1file')\n ft2file = args.get('ft2file')\n scratch = args.get('scratch', None)\n dry_run = args.get('dry_run', None)\n\n self._set_link('split-and-mktime', SplitAndMktime_SG,\n comp=comp, data=data,\n hpx_order_max=args.get('hpx_order_ccube', 9),\n ft1file=ft1file,\n ft2file=ft2file,\n do_ltsum=args.get('do_ltsum', False),\n scratch=scratch,\n dry_run=dry_run)\n\n self._set_link('coadd-split', CoaddSplit_SG,\n comp=comp, data=data,\n ft1file=ft1file)\n\n self._set_link('ltsum', Gtltsum_SG,\n comp=comp, data=data,\n ft1file=args['ft1file'],\n dry_run=dry_run)\n\n self._set_link('expcube2', Gtexpcube2_SG,\n comp=comp, data=data,\n hpx_order_max=args.get('hpx_order_expcube', 5),\n dry_run=dry_run)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nload python data structure from either a YAML or numpy file.", "response": "def load_data(infile, workdir=None):\n \"\"\"Load python data structure from either a YAML or numpy file. \"\"\"\n infile = resolve_path(infile, workdir=workdir)\n infile, ext = os.path.splitext(infile)\n\n if os.path.isfile(infile + '.npy'):\n infile += '.npy'\n elif os.path.isfile(infile + '.yaml'):\n infile += '.yaml'\n else:\n raise Exception('Input file does not exist.')\n\n ext = os.path.splitext(infile)[1]\n\n if ext == '.npy':\n return infile, load_npy(infile)\n elif ext == '.yaml':\n return infile, load_yaml(infile)\n else:\n raise Exception('Unrecognized extension.')"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nresolve the path of each file name in the file pathlist and write the updated paths to a new file.", "response": "def resolve_file_path_list(pathlist, workdir, prefix='',\n randomize=False):\n \"\"\"Resolve the path of each file name in the file ``pathlist`` and\n write the updated paths to a new file.\n \"\"\"\n files = []\n with open(pathlist, 'r') as f:\n files = [line.strip() for line in f]\n\n newfiles = []\n for f in files:\n f = os.path.expandvars(f)\n if os.path.isfile(f):\n newfiles += [f]\n else:\n newfiles += [os.path.join(workdir, f)]\n\n if randomize:\n _, tmppath = tempfile.mkstemp(prefix=prefix, dir=workdir)\n else:\n tmppath = os.path.join(workdir, prefix)\n\n tmppath += '.txt'\n\n with open(tmppath, 'w') as tmpfile:\n tmpfile.write(\"\\n\".join(newfiles))\n return tmppath"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef match_regex_list(patterns, string):\n\n for p in patterns:\n\n if re.findall(p, string):\n return True\n\n return False", "response": "Perform a regex match of a string against a list of patterns. Returns True if the string matches at least one pattern in the list otherwise False."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef find_rows_by_string(tab, names, colnames=['assoc']):\n mask = np.empty(len(tab), dtype=bool)\n mask.fill(False)\n names = [name.lower().replace(' ', '') for name in names]\n\n for colname in colnames:\n\n if colname not in tab.columns:\n continue\n\n col = tab[[colname]].copy()\n col[colname] = defchararray.replace(defchararray.lower(col[colname]).astype(str),\n ' ', '')\n for name in names:\n mask |= col[colname] == name\n return mask", "response": "Find the rows in a table that match at least one of the the\n strings in names."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nevaluates the cosine of the angular separation between two dynamical components.", "response": "def separation_cos_angle(lon0, lat0, lon1, lat1):\n \"\"\"Evaluate the cosine of the angular separation between two\n direction vectors.\"\"\"\n return (np.sin(lat1) * np.sin(lat0) + np.cos(lat1) * np.cos(lat0) *\n np.cos(lon1 - lon0))"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nconverting spherical coordinates to cartesian unit vectors.", "response": "def angle_to_cartesian(lon, lat):\n \"\"\"Convert spherical coordinates to cartesian unit vectors.\"\"\"\n theta = np.array(np.pi / 2. - lat)\n return np.vstack((np.sin(theta) * np.cos(lon),\n np.sin(theta) * np.sin(lon),\n np.cos(theta))).T"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_model_name(src):\n o = ''\n spatial_type = src['SpatialModel'].lower()\n o += spatial_type\n\n if spatial_type == 'gaussian':\n o += '_s%04.2f' % src['SpatialWidth']\n\n if src['SpectrumType'] == 'PowerLaw':\n o += '_powerlaw_%04.2f' % float(src.spectral_pars['Index']['value'])\n else:\n o += '_%s' % (src['SpectrumType'].lower())\n\n return o", "response": "Generate a name for a source object given its spatial and spectral\n properties."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef cov_to_correlation(cov):\n err = np.sqrt(np.diag(cov))\n errinv = np.ones_like(err) * np.nan\n m = np.isfinite(err) & (err != 0)\n errinv[m] = 1. / err[m]\n corr = np.array(cov)\n return corr * np.outer(errinv, errinv)", "response": "Compute the correlation matrix given the covariance matrix."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncomputing the covariance matrix x and y given the standard deviation along the semi - major axes and the rotation angle of the error ellipse.", "response": "def ellipse_to_cov(sigma_maj, sigma_min, theta):\n \"\"\"Compute the covariance matrix in two variables x and y given\n the std. deviation along the semi-major and semi-minor axes and\n the rotation angle of the error ellipse.\n\n Parameters\n ----------\n sigma_maj : float\n Std. deviation along major axis of error ellipse.\n\n sigma_min : float\n Std. deviation along minor axis of error ellipse.\n\n theta : float\n Rotation angle in radians from x-axis to ellipse major axis.\n \"\"\"\n cth = np.cos(theta)\n sth = np.sin(theta)\n covxx = cth**2 * sigma_maj**2 + sth**2 * sigma_min**2\n covyy = sth**2 * sigma_maj**2 + cth**2 * sigma_min**2\n covxy = cth * sth * sigma_maj**2 - cth * sth * sigma_min**2\n return np.array([[covxx, covxy], [covxy, covyy]])"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef onesided_cl_to_dlnl(cl):\n alpha = 1.0 - cl\n return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)", "response": "Compute the delta - loglikelihood value that corresponds to an animal s upper limit of the given confidence level."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfinds the root of a function in the interval encompassed by x0 and xb.", "response": "def find_function_root(fn, x0, xb, delta=0.0, bounds=None):\n \"\"\"Find the root of a function: f(x)+delta in the interval encompassed\n by x0 and xb.\n\n Parameters\n ----------\n\n fn : function\n Python function.\n\n x0 : float\n Fixed bound for the root search. This will either be used as\n the lower or upper bound depending on the relative value of xb.\n\n xb : float\n Upper or lower bound for the root search. If a root is not\n found in the interval [x0,xb]/[xb,x0] this value will be\n increased/decreased until a change in sign is found.\n\n \"\"\"\n\n if x0 == xb:\n return np.nan\n\n for i in range(10):\n if np.sign(fn(xb) + delta) != np.sign(fn(x0) + delta):\n break\n if bounds is not None and (xb < bounds[0] or xb > bounds[1]):\n break\n if xb < x0:\n xb *= 0.5\n else:\n xb *= 2.0\n\n # Failed to find a root\n if np.sign(fn(xb) + delta) == np.sign(fn(x0) + delta):\n return np.nan\n\n if x0 == 0:\n xtol = 1e-10 * np.abs(xb)\n else:\n xtol = 1e-10 * np.abs(xb + x0)\n\n return brentq(lambda t: fn(t) + delta, x0, xb, xtol=xtol)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncomputes upper and lower limits peak position and 1 - sigma errors from a 1 - D likelihood function.", "response": "def get_parameter_limits(xval, loglike, cl_limit=0.95, cl_err=0.68269, tol=1E-2,\n bounds=None):\n \"\"\"Compute upper/lower limits, peak position, and 1-sigma errors\n from a 1-D likelihood function. This function uses the\n delta-loglikelihood method to evaluate parameter limits by\n searching for the point at which the change in the log-likelihood\n value with respect to the maximum equals a specific value. A\n cubic spline fit to the log-likelihood values is used to\n improve the accuracy of the calculation.\n\n Parameters\n ----------\n\n xval : `~numpy.ndarray`\n Array of parameter values.\n\n loglike : `~numpy.ndarray`\n Array of log-likelihood values.\n\n cl_limit : float\n Confidence level to use for limit calculation.\n\n cl_err : float\n Confidence level to use for two-sided confidence interval\n calculation.\n\n tol : float\n Absolute precision of likelihood values.\n\n Returns\n -------\n\n x0 : float\n Coordinate at maximum of likelihood function.\n\n err_lo : float \n Lower error for two-sided confidence interval with CL\n ``cl_err``. Corresponds to point (x < x0) at which the\n log-likelihood falls by a given value with respect to the\n maximum (0.5 for 1 sigma). Set to nan if the change in the\n log-likelihood function at the lower bound of the ``xval``\n input array is less than than the value for the given CL.\n\n err_hi : float\n Upper error for two-sided confidence interval with CL\n ``cl_err``. Corresponds to point (x > x0) at which the\n log-likelihood falls by a given value with respect to the\n maximum (0.5 for 1 sigma). Set to nan if the change in the\n log-likelihood function at the upper bound of the ``xval``\n input array is less than the value for the given CL.\n\n err : float\n Symmetric 1-sigma error. Average of ``err_lo`` and ``err_hi``\n if both are defined.\n\n ll : float\n Lower limit evaluated at confidence level ``cl_limit``.\n\n ul : float\n Upper limit evaluated at confidence level ``cl_limit``.\n\n lnlmax : float\n Log-likelihood value at ``x0``.\n\n \"\"\"\n\n dlnl_limit = onesided_cl_to_dlnl(cl_limit)\n dlnl_err = twosided_cl_to_dlnl(cl_err)\n\n try:\n # Pad the likelihood function\n # if len(xval) >= 3 and np.max(loglike) - loglike[-1] < 1.5*dlnl_limit:\n # p = np.polyfit(xval[-3:], loglike[-3:], 2)\n # x = np.linspace(xval[-1], 10 * xval[-1], 3)[1:]\n # y = np.polyval(p, x)\n # x = np.concatenate((xval, x))\n # y = np.concatenate((loglike, y))\n # else:\n x, y = xval, loglike\n spline = UnivariateSpline(x, y, k=2,\n #k=min(len(xval) - 1, 3),\n w=(1 / tol) * np.ones(len(x)))\n except:\n print(\"Failed to create spline: \", xval, loglike)\n return {'x0': np.nan, 'ul': np.nan, 'll': np.nan,\n 'err_lo': np.nan, 'err_hi': np.nan, 'err': np.nan,\n 'lnlmax': np.nan}\n\n sd = spline.derivative()\n\n imax = np.argmax(loglike)\n ilo = max(imax - 1, 0)\n ihi = min(imax + 1, len(xval) - 1)\n\n # Find the peak\n x0 = xval[imax]\n\n # Refine the peak position\n if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):\n x0 = find_function_root(sd, xval[ilo], xval[ihi])\n\n lnlmax = float(spline(x0))\n\n def fn(t): return spline(t) - lnlmax\n fn_val = fn(xval)\n if np.any(fn_val[imax:] < -dlnl_limit):\n xhi = xval[imax:][fn_val[imax:] < -dlnl_limit][0]\n else:\n xhi = xval[-1] \n # EAC: brute force check that xhi is greater than x0\n # The fabs is here in case x0 is negative\n if xhi <= x0:\n xhi = x0 + np.fabs(x0)\n\n if np.any(fn_val[:imax] < -dlnl_limit):\n xlo = xval[:imax][fn_val[:imax] < -dlnl_limit][-1]\n else:\n xlo = xval[0]\n # EAC: brute force check that xlo is less than x0\n # The fabs is here in case x0 is negative \n if xlo >= x0:\n xlo = x0 - 0.5*np.fabs(x0)\n\n ul = find_function_root(fn, x0, xhi, dlnl_limit, bounds=bounds)\n ll = find_function_root(fn, x0, xlo, dlnl_limit, bounds=bounds)\n err_lo = np.abs(x0 - find_function_root(fn, x0, xlo, dlnl_err,\n bounds=bounds))\n err_hi = np.abs(x0 - find_function_root(fn, x0, xhi, dlnl_err,\n bounds=bounds))\n\n err = np.nan\n if np.isfinite(err_lo) and np.isfinite(err_hi):\n err = 0.5 * (err_lo + err_hi)\n elif np.isfinite(err_hi):\n err = err_hi\n elif np.isfinite(err_lo):\n err = err_lo\n\n o = {'x0': x0, 'ul': ul, 'll': ll,\n 'err_lo': err_lo, 'err_hi': err_hi, 'err': err,\n 'lnlmax': lnlmax}\n return o"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nevaluating a 2D parabola given by the input tuple xy.", "response": "def parabola(xy, amplitude, x0, y0, sx, sy, theta):\n \"\"\"Evaluate a 2D parabola given by:\n\n f(x,y) = f_0 - (1/2) * \\delta^T * R * \\Sigma * R^T * \\delta\n\n where\n\n \\delta = [(x - x_0), (y - y_0)]\n\n and R is the matrix for a 2D rotation by angle \\theta and \\Sigma\n is the covariance matrix:\n\n \\Sigma = [[1/\\sigma_x^2, 0 ],\n [0 , 1/\\sigma_y^2]] \n\n Parameters\n ----------\n xy : tuple \n Tuple containing x and y arrays for the values at which the\n parabola will be evaluated.\n\n amplitude : float\n Constant offset value.\n\n x0 : float\n Centroid in x coordinate.\n\n y0 : float\n Centroid in y coordinate.\n\n sx : float\n Standard deviation along first axis (x-axis when theta=0).\n\n sy : float\n Standard deviation along second axis (y-axis when theta=0).\n\n theta : float\n Rotation angle in radians.\n\n Returns\n -------\n vals : `~numpy.ndarray` \n Values of the parabola evaluated at the points defined in the\n `xy` input tuple.\n\n \"\"\"\n\n x = xy[0]\n y = xy[1]\n\n cth = np.cos(theta)\n sth = np.sin(theta)\n a = (cth ** 2) / (2 * sx ** 2) + (sth ** 2) / (2 * sy ** 2)\n b = -(np.sin(2 * theta)) / (4 * sx ** 2) + (np.sin(2 * theta)) / (\n 4 * sy ** 2)\n c = (sth ** 2) / (2 * sx ** 2) + (cth ** 2) / (2 * sy ** 2)\n vals = amplitude - (a * ((x - x0) ** 2) +\n 2 * b * (x - x0) * (y - y0) +\n c * ((y - y0) ** 2))\n\n return vals"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets mask of connected region within delta of max ( z ).", "response": "def get_region_mask(z, delta, xy=None):\n \"\"\"Get mask of connected region within delta of max(z).\"\"\"\n\n if xy is None:\n ix, iy = np.unravel_index(np.argmax(z), z.shape)\n else:\n ix, iy = xy\n\n mz = (z > z[ix, iy] - delta)\n labels = label(mz)[0]\n mz &= labels == labels[ix, iy]\n return mz"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nfits a parabola to a 2D numpy array.", "response": "def fit_parabola(z, ix, iy, dpix=3, zmin=None):\n \"\"\"Fit a parabola to a 2D numpy array. This function will fit a\n parabola with the functional form described in\n `~fermipy.utils.parabola` to a 2D slice of the input array `z`.\n The fit region encompasses pixels that are within `dpix` of the\n pixel coordinate (iz,iy) OR that have a value relative to the peak\n value greater than `zmin`.\n\n Parameters\n ----------\n z : `~numpy.ndarray`\n\n ix : int\n X index of center pixel of fit region in array `z`.\n\n iy : int\n Y index of center pixel of fit region in array `z`.\n\n dpix : int\n Max distance from center pixel of fit region.\n\n zmin : float\n\n \"\"\"\n offset = make_pixel_distance(z.shape, iy, ix)\n x, y = np.meshgrid(np.arange(z.shape[0]), np.arange(z.shape[1]),\n indexing='ij')\n\n m = (offset <= dpix)\n if np.sum(m) < 9:\n m = (offset <= dpix + 0.5)\n\n if zmin is not None:\n m |= get_region_mask(z, np.abs(zmin), (ix, iy))\n\n sx = get_bounded_slice(ix, dpix, z.shape[0])\n sy = get_bounded_slice(iy, dpix, z.shape[1])\n\n coeffx = poly_to_parabola(np.polyfit(x[sx, iy], z[sx, iy], 2))\n coeffy = poly_to_parabola(np.polyfit(y[ix, sy], z[ix, sy], 2))\n #p0 = [coeffx[2], coeffx[0], coeffy[0], coeffx[1], coeffy[1], 0.0]\n p0 = [coeffx[2], float(ix), float(iy), coeffx[1], coeffy[1], 0.0]\n\n o = {'fit_success': True, 'p0': p0}\n\n def curve_fit_fn(*args):\n return np.ravel(parabola(*args))\n\n try:\n bounds = (-np.inf * np.ones(6), np.inf * np.ones(6))\n bounds[0][1] = -0.5\n bounds[0][2] = -0.5\n bounds[1][1] = z.shape[0] - 0.5\n bounds[1][2] = z.shape[1] - 0.5\n popt, pcov = scipy.optimize.curve_fit(curve_fit_fn,\n (np.ravel(x[m]), np.ravel(y[m])),\n np.ravel(z[m]), p0, bounds=bounds)\n except Exception:\n popt = copy.deepcopy(p0)\n o['fit_success'] = False\n\n fm = parabola((x[m], y[m]), *popt)\n df = fm - z[m]\n rchi2 = np.sum(df ** 2) / len(fm)\n\n o['rchi2'] = rchi2\n o['x0'] = popt[1]\n o['y0'] = popt[2]\n o['sigmax'] = np.abs(popt[3])\n o['sigmay'] = np.abs(popt[4])\n o['sigma'] = np.sqrt(o['sigmax'] ** 2 + o['sigmay'] ** 2)\n o['z0'] = popt[0]\n o['theta'] = popt[5]\n o['popt'] = popt\n o['mask'] = m\n\n a = max(o['sigmax'], o['sigmay'])\n b = min(o['sigmax'], o['sigmay'])\n\n o['eccentricity'] = np.sqrt(1 - b ** 2 / a ** 2)\n o['eccentricity2'] = np.sqrt(a ** 2 / b ** 2 - 1)\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef split_bin_edges(edges, npts=2):\n if npts < 2:\n return edges\n\n x = (edges[:-1, None] +\n (edges[1:, None] - edges[:-1, None]) *\n np.linspace(0.0, 1.0, npts + 1)[None, :])\n return np.unique(np.ravel(x))", "response": "Subdivide an array of bins by splitting each bin into npts subintervals."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nconvert axis coordinate to bin index.", "response": "def val_to_bin(edges, x):\n \"\"\"Convert axis coordinate to bin index.\"\"\"\n ibin = np.digitize(np.array(x, ndmin=1), edges) - 1\n return ibin"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nconverting axis coordinate to bin index.", "response": "def val_to_edge(edges, x):\n \"\"\"Convert axis coordinate to bin index.\"\"\"\n edges = np.array(edges)\n w = edges[1:] - edges[:-1]\n w = np.insert(w, 0, w[0])\n ibin = np.digitize(np.array(x, ndmin=1), edges - 0.5 * w) - 1\n ibin[ibin < 0] = 0\n return ibin"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef val_to_bin_bounded(edges, x):\n nbins = len(edges) - 1\n ibin = val_to_bin(edges, x)\n ibin[ibin < 0] = 0\n ibin[ibin > nbins - 1] = nbins - 1\n return ibin", "response": "Convert axis coordinate to bin index."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef extend_array(edges, binsz, lo, hi):\n\n numlo = int(np.ceil((edges[0] - lo) / binsz))\n numhi = int(np.ceil((hi - edges[-1]) / binsz))\n\n edges = copy.deepcopy(edges)\n if numlo > 0:\n edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1)\n edges = np.concatenate((edges_lo[:-1], edges))\n\n if numhi > 0:\n edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1)\n edges = np.concatenate((edges, edges_hi[1:]))\n\n return edges", "response": "Extend an array to encompass lo and hi values."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nconvert a FITS recarray to a python dictionary.", "response": "def fits_recarray_to_dict(table):\n \"\"\"Convert a FITS recarray to a python dictionary.\"\"\"\n\n cols = {}\n for icol, col in enumerate(table.columns.names):\n\n col_data = table.data[col]\n if type(col_data[0]) == np.float32:\n cols[col] = np.array(col_data, dtype=float)\n elif type(col_data[0]) == np.float64:\n cols[col] = np.array(col_data, dtype=float)\n elif type(col_data[0]) == str:\n cols[col] = np.array(col_data, dtype=str)\n elif type(col_data[0]) == np.string_:\n cols[col] = np.array(col_data, dtype=str)\n elif type(col_data[0]) == np.int16:\n cols[col] = np.array(col_data, dtype=int)\n elif type(col_data[0]) == np.ndarray:\n cols[col] = np.array(col_data)\n else:\n raise Exception(\n 'Unrecognized column type: %s %s' % (col, str(type(col_data))))\n\n return cols"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns a pretty - printed XML string for the Element.", "response": "def prettify_xml(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n from xml.dom import minidom\n import xml.etree.cElementTree as et\n\n rough_string = et.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef tolist(x):\n if isinstance(x, list):\n return map(tolist, x)\n elif isinstance(x, dict):\n return dict((tolist(k), tolist(v)) for k, v in x.items())\n elif isinstance(x, np.ndarray) or isinstance(x, np.number):\n # note, call tolist again to convert strings of numbers to numbers\n return tolist(x.tolist())\n elif isinstance(x, OrderedDict):\n return dict(x)\n elif isinstance(x, np.bool_):\n return bool(x)\n elif isstr(x) or isinstance(x, np.str):\n x = str(x) # convert unicode & numpy strings\n try:\n return int(x)\n except:\n try:\n return float(x)\n except:\n if x == 'True':\n return True\n elif x == 'False':\n return False\n else:\n return x\n else:\n return x", "response": "a convenience function that takes in a nested structure of lists and dictionaries and returns a list of base objects."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nevaluating the convolution f ( r ) = f ( r ) * g ( r ) where r is azimuthally symmetric function in two dimensions and g is a step function given by nstep.", "response": "def convolve2d_disk(fn, r, sig, nstep=200):\n \"\"\"Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is\n azimuthally symmetric function in two dimensions and g is a\n step function given by:\n\n g(r) = H(1-r/s)\n\n Parameters\n ----------\n\n fn : function\n Input function that takes a single radial coordinate parameter.\n\n r : `~numpy.ndarray`\n Array of points at which the convolution is to be evaluated.\n\n sig : float\n Radius parameter of the step function.\n\n nstep : int\n Number of sampling point for numeric integration.\n \"\"\"\n\n r = np.array(r, ndmin=1)\n sig = np.array(sig, ndmin=1)\n\n rmin = r - sig\n rmax = r + sig\n rmin[rmin < 0] = 0\n delta = (rmax - rmin) / nstep\n\n redge = rmin[..., np.newaxis] + \\\n delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1)\n rp = 0.5 * (redge[..., 1:] + redge[..., :-1])\n dr = redge[..., 1:] - redge[..., :-1]\n fnv = fn(rp)\n\n r = r.reshape(r.shape + (1,))\n cphi = -np.ones(dr.shape)\n m = ((rp + r) / sig < 1) | (r == 0)\n\n rrp = r * rp\n sx = r ** 2 + rp ** 2 - sig ** 2\n cphi[~m] = sx[~m] / (2 * rrp[~m])\n dphi = 2 * np.arccos(cphi)\n v = rp * fnv * dphi * dr / (np.pi * sig * sig)\n s = np.sum(v, axis=-1)\n\n return s"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef convolve2d_gauss(fn, r, sig, nstep=200):\n r = np.array(r, ndmin=1)\n sig = np.array(sig, ndmin=1)\n\n rmin = r - 10 * sig\n rmax = r + 10 * sig\n rmin[rmin < 0] = 0\n delta = (rmax - rmin) / nstep\n\n redge = (rmin[..., np.newaxis] +\n delta[..., np.newaxis] *\n np.linspace(0, nstep, nstep + 1))\n\n rp = 0.5 * (redge[..., 1:] + redge[..., :-1])\n dr = redge[..., 1:] - redge[..., :-1]\n fnv = fn(rp)\n\n r = r.reshape(r.shape + (1,))\n sig2 = sig * sig\n x = r * rp / (sig2)\n\n if 'je_fn' not in convolve2d_gauss.__dict__:\n t = 10 ** np.linspace(-8, 8, 1000)\n t = np.insert(t, 0, [0])\n je = special.ive(0, t)\n convolve2d_gauss.je_fn = UnivariateSpline(t, je, k=2, s=0)\n\n je = convolve2d_gauss.je_fn(x.flat).reshape(x.shape)\n #je2 = special.ive(0,x)\n v = (rp * fnv / (sig2) * je * np.exp(x - (r * r + rp * rp) /\n (2 * sig2)) * dr)\n s = np.sum(v, axis=-1)\n\n return s", "response": "Evaluate the convolution f ( r ) = f ( r ) * g ( r ) where r is azimuthally symmetric function in two dimensions and g is a gaussian with standard deviation s given by 2 * pi * s^2."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nfilling a 2D array with dimensions shape with the distance of each pixel from a reference direction xpix ypix in pixel coordinates.", "response": "def make_pixel_distance(shape, xpix=None, ypix=None):\n \"\"\"Fill a 2D array with dimensions `shape` with the distance of each\n pixel from a reference direction (xpix,ypix) in pixel coordinates.\n Pixel coordinates are defined such that (0,0) is located at the\n center of the corner pixel.\n\n \"\"\"\n if np.isscalar(shape):\n shape = [shape, shape]\n\n if xpix is None:\n xpix = (shape[1] - 1.0) / 2.\n\n if ypix is None:\n ypix = (shape[0] - 1.0) / 2.\n\n dx = np.linspace(0, shape[1] - 1, shape[1]) - xpix\n dy = np.linspace(0, shape[0] - 1, shape[0]) - ypix\n dxy = np.zeros(shape)\n dxy += np.sqrt(dx[np.newaxis, :] ** 2 + dy[:, np.newaxis] ** 2)\n\n return dxy"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=None, ypix=None):\n\n sigma /= cdelt\n\n def fn(t, s): return 1. / (2 * np.pi * s ** 2) * np.exp(\n -t ** 2 / (s ** 2 * 2.0))\n dxy = make_pixel_distance(npix, xpix, ypix)\n k = fn(dxy, sigma)\n k /= (np.sum(k) * np.radians(cdelt) ** 2)\n\n return k", "response": "Make a 2D gaussian kernel."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nmakes kernel for a 2D disk.", "response": "def make_disk_kernel(radius, npix=501, cdelt=0.01, xpix=None, ypix=None):\n \"\"\"Make kernel for a 2D disk.\n\n Parameters\n ----------\n\n radius : float\n Disk radius in deg.\n \"\"\"\n\n radius /= cdelt\n\n def fn(t, s): return 0.5 * (np.sign(s - t) + 1.0)\n\n dxy = make_pixel_distance(npix, xpix, ypix)\n k = fn(dxy, radius)\n k /= (np.sum(k) * np.radians(cdelt) ** 2)\n\n return k"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nmaking a kernel for a PSF - convolved 2D disk.", "response": "def make_cdisk_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,\n normalize=False):\n \"\"\"Make a kernel for a PSF-convolved 2D disk.\n\n Parameters\n ----------\n\n psf : `~fermipy.irfs.PSFModel`\n\n sigma : float\n 68% containment radius in degrees.\n \"\"\"\n\n sigma /= 0.8246211251235321\n\n dtheta = psf.dtheta\n egy = psf.energies\n\n x = make_pixel_distance(npix, xpix, ypix)\n x *= cdelt\n\n k = np.zeros((len(egy), npix, npix))\n for i in range(len(egy)):\n def fn(t): return psf.eval(i, t, scale_fn=psf_scale_fn)\n psfc = convolve2d_disk(fn, dtheta, sigma)\n k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape)\n\n if normalize:\n k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)\n\n return k"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nmakes a kernel for a general radially symmetric 2D function. Parameters ---------- psf : `~fermipy.irfs.PSFModel` fn : callable Function that evaluates the kernel at a radial coordinate r. sigma : float 68% containment radius in degrees.", "response": "def make_radial_kernel(psf, fn, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,\n normalize=False, klims=None, sparse=False):\n \"\"\"Make a kernel for a general radially symmetric 2D function.\n\n Parameters\n ----------\n\n psf : `~fermipy.irfs.PSFModel`\n\n fn : callable\n Function that evaluates the kernel at a radial coordinate r.\n\n sigma : float\n 68% containment radius in degrees.\n \"\"\"\n\n if klims is None:\n egy = psf.energies\n else:\n egy = psf.energies[klims[0]:klims[1] + 1]\n ang_dist = make_pixel_distance(npix, xpix, ypix) * cdelt\n max_ang_dist = np.max(ang_dist) + cdelt\n #dtheta = np.linspace(0.0, (np.max(ang_dist) * 1.05)**0.5, 200)**2.0\n # z = create_kernel_function_lookup(psf, fn, sigma, egy,\n # dtheta, psf_scale_fn)\n\n shape = (len(egy), npix, npix)\n k = np.zeros(shape)\n\n r99 = psf.containment_angle(energies=egy, fraction=0.997)\n r34 = psf.containment_angle(energies=egy, fraction=0.34)\n\n rmin = np.maximum(r34 / 4., 0.01)\n rmax = np.maximum(r99, 0.1)\n if sigma is not None:\n rmin = np.maximum(rmin, 0.5 * sigma)\n rmax = np.maximum(rmax, 2.0 * r34 + 3.0 * sigma)\n rmax = np.minimum(rmax, max_ang_dist)\n\n for i in range(len(egy)):\n\n rebin = min(int(np.ceil(cdelt / rmin[i])), 8)\n if sparse:\n dtheta = np.linspace(0.0, rmax[i]**0.5, 100)**2.0\n else:\n dtheta = np.linspace(0.0, max_ang_dist**0.5, 200)**2.0\n\n z = eval_radial_kernel(psf, fn, sigma, i, dtheta, psf_scale_fn)\n xdist = make_pixel_distance(npix * rebin,\n xpix * rebin + (rebin - 1.0) / 2.,\n ypix * rebin + (rebin - 1.0) / 2.)\n xdist *= cdelt / float(rebin)\n #x = val_to_pix(dtheta, np.ravel(xdist))\n\n if sparse:\n m = np.ravel(xdist) < rmax[i]\n kk = np.zeros(xdist.size)\n #kk[m] = map_coordinates(z, [x[m]], order=2, prefilter=False)\n kk[m] = np.interp(np.ravel(xdist)[m], dtheta, z)\n kk = kk.reshape(xdist.shape)\n else:\n kk = np.interp(np.ravel(xdist), dtheta, z).reshape(xdist.shape)\n # kk = map_coordinates(z, [x], order=2,\n # prefilter=False).reshape(xdist.shape)\n\n if rebin > 1:\n kk = sum_bins(kk, 0, rebin)\n kk = sum_bins(kk, 1, rebin)\n\n k[i] = kk / float(rebin)**2\n\n k = k.reshape((len(egy),) + ang_dist.shape)\n if normalize:\n k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)\n\n return k"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ngenerate a kernel for a point - source.", "response": "def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False):\n \"\"\"\n Generate a kernel for a point-source.\n\n Parameters\n ----------\n\n psf : `~fermipy.irfs.PSFModel`\n\n npix : int\n Number of pixels in X and Y dimensions.\n\n cdelt : float\n Pixel size in degrees.\n\n \"\"\"\n\n egy = psf.energies\n x = make_pixel_distance(npix, xpix, ypix)\n x *= cdelt\n\n k = np.zeros((len(egy), npix, npix))\n for i in range(len(egy)):\n k[i] = psf.eval(i, x, scale_fn=psf_scale_fn)\n\n if normalize:\n k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)\n\n return k"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a tuple of slices that can be used to extract the overlapping part of a large array and a small array at the given position.", "response": "def overlap_slices(large_array_shape, small_array_shape, position):\n \"\"\"\n Modified version of `~astropy.nddata.utils.overlap_slices`.\n\n Get slices for the overlapping part of a small and a large array.\n\n Given a certain position of the center of the small array, with\n respect to the large array, tuples of slices are returned which can be\n used to extract, add or subtract the small array at the given\n position. This function takes care of the correct behavior at the\n boundaries, where the small array is cut of appropriately.\n\n Parameters\n ----------\n large_array_shape : tuple\n Shape of the large array.\n small_array_shape : tuple\n Shape of the small array.\n position : tuple\n Position of the small array's center, with respect to the large array.\n Coordinates should be in the same order as the array shape.\n\n Returns\n -------\n slices_large : tuple of slices\n Slices in all directions for the large array, such that\n ``large_array[slices_large]`` extracts the region of the large array\n that overlaps with the small array.\n slices_small : slice\n Slices in all directions for the small array, such that\n ``small_array[slices_small]`` extracts the region that is inside the\n large array.\n \"\"\"\n # Get edge coordinates\n edges_min = [int(pos - small_shape // 2) for (pos, small_shape) in\n zip(position, small_array_shape)]\n edges_max = [int(pos + (small_shape - small_shape // 2)) for\n (pos, small_shape) in\n zip(position, small_array_shape)]\n\n # Set up slices\n slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max))\n for (edge_min, edge_max, large_shape) in\n zip(edges_min, edges_max, large_array_shape))\n slices_small = tuple(slice(max(0, -edge_min),\n min(large_shape - edge_min,\n edge_max - edge_min))\n for (edge_min, edge_max, large_shape) in\n zip(edges_min, edges_max, large_array_shape))\n\n return slices_large, slices_small"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nbuild and return a ModelManager object and fill the associated model library", "response": "def make_library(**kwargs):\n \"\"\"Build and return a ModelManager object and fill the associated model library\n \"\"\"\n library_yaml = kwargs.pop('library', 'models/library.yaml')\n comp_yaml = kwargs.pop('comp', 'config/binning.yaml')\n basedir = kwargs.pop('basedir', os.path.abspath('.'))\n\n model_man = kwargs.get('ModelManager', ModelManager(basedir=basedir))\n\n model_comp_dict = model_man.make_library(library_yaml, library_yaml, comp_yaml)\n\n return dict(model_comp_dict=model_comp_dict,\n ModelManager=model_man)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the list of source for which energy dispersion should be turned off", "response": "def edisp_disable_list(self):\n \"\"\" Return the list of source for which energy dispersion should be turned off \"\"\"\n l = []\n for model_comp in self.model_components.values():\n if model_comp.edisp_disable:\n l += [model_comp.info.source_name]\n return l"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef make_srcmap_manifest(self, components, name_factory):\n ret_dict = {}\n\n for comp in components:\n compkey = comp.make_key('{ebin_name}_{evtype_name}')\n zcut = \"zmax%i\" % comp.zmax\n name_keys = dict(modelkey=self.model_name,\n zcut=zcut,\n ebin=comp.ebin_name,\n mktime='none',\n psftype=comp.evtype_name,\n coordsys=comp.coordsys)\n outsrcmap = name_factory.merged_srcmaps(**name_keys)\n ccube = name_factory.ccube(**name_keys)\n src_dict = {}\n for comp_name, model_comp in self.model_components.items():\n comp_info = model_comp.info\n model_type = comp_info.model_type\n name_keys['sourcekey'] = comp_name\n if model_type in ['CatalogSources']:\n #sourcekey = comp_info.comp_key\n sources = comp_info.source_names\n name_keys['sourcekey'] = comp_info.catalog_info.catalog_name\n elif model_type in ['CompositeSource']:\n #sourcekey = comp_info.sourcekey\n name_keys['sourcekey'] = comp_info.sourcekey\n sources = [comp_info.source_name]\n else:\n #sourcekey = comp_name\n sources = [comp_info.source_name]\n src_dict[comp_name] = dict(sourcekey=comp_name,\n srcmap_file=name_factory.srcmaps(**name_keys),\n source_names=sources)\n comp_dict = dict(outsrcmap=outsrcmap,\n ccube=ccube,\n source_dict=src_dict)\n ret_dict[compkey] = comp_dict\n\n return ret_dict", "response": "Builds a manifest of the source maps for each component of the analysis."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncreate the ROI model objects for each of the binning components.", "response": "def make_model_rois(self, components, name_factory):\n \"\"\" Make the fermipy roi_model objects for each of a set of binning components \"\"\"\n ret_dict = {}\n\n # Figure out which sources need to be split by components\n master_roi_source_info = {}\n sub_comp_sources = {}\n for comp_name, model_comp in self.model_components.items():\n comp_info = model_comp.info\n if comp_info.components is None:\n master_roi_source_info[comp_name] = model_comp\n else:\n sub_comp_sources[comp_name] = model_comp\n\n # Build the xml for the master\n master_roi = SourceFactory.make_roi(master_roi_source_info)\n master_xml_mdl = name_factory.master_srcmdl_xml(\n modelkey=self.model_name)\n print(\"Writing master ROI model to %s\" % master_xml_mdl)\n master_roi.write_xml(master_xml_mdl)\n ret_dict['master'] = master_roi\n\n # Now deal with the components\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n compkey = \"%s_%s\" % (zcut, comp.make_key(\n '{ebin_name}_{evtype_name}'))\n # name_keys = dict(zcut=zcut,\n # modelkey=self.model_name,\n # component=compkey)\n comp_roi_source_info = {}\n for comp_name, model_comp in sub_comp_sources.items():\n comp_info = model_comp.info\n if comp_info.selection_dependent:\n key = comp.make_key('{ebin_name}_{evtype_name}')\n elif comp_info.moving:\n key = zcut\n info_clone = comp_info.components[key].clone_and_merge_sub(key)\n comp_roi_source_info[comp_name] =\\\n ModelComponent(info=info_clone,\n spectrum=model_comp.spectrum)\n\n # Build the xml for the component\n comp_roi = SourceFactory.make_roi(comp_roi_source_info)\n comp_xml_mdl = name_factory.comp_srcmdl_xml(modelkey=self.model_name,\n component=compkey)\n print(\"Writing component ROI model to %s\" % comp_xml_mdl)\n comp_roi.write_xml(comp_xml_mdl)\n ret_dict[compkey] = comp_roi\n\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nread the model yaml file for the diffuse components", "response": "def read_model_yaml(self, modelkey):\n \"\"\" Read the yaml file for the diffuse components\n \"\"\"\n model_yaml = self._name_factory.model_yaml(modelkey=modelkey,\n fullpath=True)\n model = yaml.safe_load(open(model_yaml))\n return model"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nbuilding the library of all the components", "response": "def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml):\n \"\"\" Build up the library of all the components\n\n Parameters\n ----------\n\n diffuse_yaml : str\n Name of the yaml file with the library of diffuse component definitions\n catalog_yaml : str\n Name of the yaml file width the library of catalog split definitions\n binning_yaml : str\n Name of the yaml file with the binning definitions\n \"\"\"\n ret_dict = {}\n #catalog_dict = yaml.safe_load(open(catalog_yaml))\n components_dict = Component.build_from_yamlfile(binning_yaml)\n diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm,\n DiffuseModelManager=self._dmm,\n library=diffuse_yaml,\n components=components_dict)\n catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml,\n CatalogSourceManager=self._csm)\n ret_dict.update(diffuse_ret_dict['comp_info_dict'])\n ret_dict.update(catalog_ret_dict['comp_info_dict'])\n self._library.update(ret_dict)\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nbuilding a dictionary with the information for a particular model.", "response": "def make_model_info(self, modelkey):\n \"\"\" Build a dictionary with the information for a particular model.\n\n Parameters\n ----------\n\n modelkey : str\n Key used to identify this particular model\n\n Return `ModelInfo`\n \"\"\"\n model = self.read_model_yaml(modelkey)\n sources = model['sources']\n components = OrderedDict()\n spec_model_yaml = self._name_factory.fullpath(localpath=model['spectral_models'])\n self._spec_lib.update(yaml.safe_load(open(spec_model_yaml)))\n for source, source_info in sources.items():\n model_type = source_info.get('model_type', None)\n par_overrides = source_info.get('par_overides', None)\n version = source_info['version']\n spec_type = source_info['SpectrumType']\n edisp_disable = source_info.get('edisp_disable', False)\n sourcekey = \"%s_%s\" % (source, version)\n if model_type == 'galprop_rings':\n comp_info_dict = self.gmm.diffuse_comp_info_dicts(version)\n def_spec_type = spec_type['default']\n for comp_key, comp_info in comp_info_dict.items():\n model_comp = ModelComponent(info=comp_info,\n spectrum=\\\n self._spec_lib[spec_type.get(comp_key,\n def_spec_type)],\n par_overrides=par_overrides,\n edisp_disable=edisp_disable)\n components[comp_key] = model_comp\n elif model_type == 'Catalog':\n comp_info_dict = self.csm.split_comp_info_dict(source, version)\n def_spec_type = spec_type['default']\n for comp_key, comp_info in comp_info_dict.items():\n model_comp = ModelComponent(info=comp_info,\n spectrum=\\\n self._spec_lib[spec_type.get(comp_key,\n def_spec_type)],\n par_overrides=par_overrides,\n edisp_disable=edisp_disable)\n components[comp_key] = model_comp\n else:\n comp_info = self.dmm.diffuse_comp_info(sourcekey)\n model_comp = ModelComponent(info=comp_info,\n spectrum=self._spec_lib[spec_type],\n par_overrides=par_overrides,\n edisp_disable=edisp_disable)\n components[sourcekey] = model_comp\n ret_val = ModelInfo(model_name=modelkey,\n model_components=components)\n self._models[modelkey] = ret_val\n return ret_val"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef make_srcmap_manifest(self, modelkey, components, data):\n try:\n model_info = self._models[modelkey]\n except KeyError:\n model_info = self.make_model_info(modelkey)\n self._name_factory.update_base_dict(data)\n outfile = os.path.join('analysis', 'model_%s' %\n modelkey, 'srcmap_manifest_%s.yaml' % modelkey)\n manifest = model_info.make_srcmap_manifest(\n components, self._name_factory)\n\n outdir = os.path.dirname(outfile)\n try:\n os.makedirs(outdir)\n except OSError:\n pass\n utils.write_yaml(manifest, outfile)", "response": "Builds a yaml file that specfies how to make the srcmap files for a particular model."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef make_fermipy_config_yaml(self, modelkey, components, data, **kwargs):\n model_dir = os.path.join('analysis', 'model_%s' % modelkey)\n hpx_order = kwargs.get('hpx_order', 9)\n self._name_factory.update_base_dict(data)\n try:\n model_info = self._models[modelkey]\n except KeyError:\n model_info = self.make_model_info(modelkey)\n\n model_rois = model_info.make_model_rois(components, self._name_factory)\n\n #source_names = model_info.component_names\n\n master_xml_mdl = self._name_factory.master_srcmdl_xml(modelkey=modelkey, fullpath=True)\n\n master_data = dict(scfile=self._name_factory.ft2file(fullpath=True),\n cacheft1=False)\n master_binning = dict(projtype='HPX',\n roiwidth=360.,\n binsperdec=4,\n coordsys='GAL',\n hpx_ordering_scheme=\"RING\",\n hpx_order=hpx_order,\n hpx_ebin=True)\n # master_fileio = dict(outdir=model_dir,\n # logfile=os.path.join(model_dir, 'fermipy.log'))\n master_fileio = dict(logfile='fermipy.log')\n master_gtlike = dict(irfs=self._name_factory.irfs(**kwargs),\n edisp_disable=model_info.edisp_disable_list(),\n use_external_srcmap=True)\n master_selection = dict(glat=0., glon=0., radius=180.)\n master_model = dict(catalogs=[os.path.join(model_dir, master_xml_mdl)])\n master_plotting = dict(label_ts_threshold=1e9)\n\n master = dict(data=master_data,\n binning=master_binning,\n fileio=master_fileio,\n selection=master_selection,\n gtlike=master_gtlike,\n model=master_model,\n plotting=master_plotting,\n components=[])\n\n fermipy_dict = master\n\n #comp_rois = {}\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n compkey = \"%s_%s\" % (zcut, comp.make_key(\n '{ebin_name}_{evtype_name}'))\n comp_roi = model_rois[compkey]\n name_keys = dict(zcut=zcut,\n modelkey=modelkey,\n component=compkey,\n mktime='none',\n coordsys=comp.coordsys,\n fullpath=True)\n comp_data = dict(ltcube=self._name_factory.ltcube(**name_keys))\n comp_selection = dict(logemin=comp.log_emin,\n logemax=comp.log_emax,\n zmax=comp.zmax,\n evtype=comp.evtype)\n comp_binning = dict(enumbins=comp.enumbins,\n hpx_order=min(comp.hpx_order, hpx_order),\n coordsys=comp.coordsys)\n comp_gtlike = dict(srcmap=self._name_factory.merged_srcmaps(**name_keys),\n bexpmap=self._name_factory.bexpcube(**name_keys),\n use_external_srcmap=True)\n comp_diffuse_xml = [self._name_factory.comp_srcmdl_xml(**name_keys)]\n #comp_roi_source_info = {}\n #diffuse_srcs = []\n #for src in comp_roi.diffuse_sources:\n # if isinstance(src, MapCubeSource):\n # diffuse_srcs.append(dict(name=src.name, file=src.mapcube))\n # elif isinstance(src, IsoSource):\n # diffuse_srcs.append(dict(name=src.name, file=src.fileFunction))\n # else:\n # pass\n\n comp_model = dict(diffuse_xml=comp_diffuse_xml)\n sub_dict = dict(data=comp_data,\n binning=comp_binning,\n selection=comp_selection,\n gtlike=comp_gtlike,\n model=comp_model)\n fermipy_dict['components'].append(sub_dict)\n\n # Add placeholder diffuse sources\n #fermipy_dict['model']['diffuse'] = diffuse_srcs\n\n outfile = os.path.join(model_dir, 'config.yaml')\n print(\"Writing fermipy config file %s\" % outfile)\n utils.write_yaml(fermipy_dict, outfile)\n return fermipy_dict", "response": "Build a fermipy top - level yaml configuration file for a particular model."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_sub_comp_info(source_info, comp):\n sub_comps = source_info.get('components', None)\n if sub_comps is None:\n return source_info.copy()\n moving = source_info.get('moving', False)\n selection_dependent = source_info.get('selection_dependent', False)\n if selection_dependent:\n key = comp.make_key('{ebin_name}_{evtype_name}')\n elif moving:\n key = \"zmax%i\" % comp.zmax\n ret_dict = source_info.copy()\n ret_dict.update(sub_comps[key])\n return ret_dict", "response": "Build and return information about a sub - component for a particular selection\n "} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nsubstitute aliases in a cut dictionary.", "response": "def replace_aliases(cut_dict, aliases):\n \"\"\"Substitute aliases in a cut dictionary.\"\"\"\n for k, v in cut_dict.items():\n for k0, v0 in aliases.items():\n cut_dict[k] = cut_dict[k].replace(k0, '(%s)' % v0)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nextracting a list of file paths from a list containing both paths and file lists with one path per line.", "response": "def get_files(files, extnames=['.root']):\n \"\"\"Extract a list of file paths from a list containing both paths\n and file lists with one path per line.\"\"\"\n\n files_out = []\n for f in files:\n\n mime = mimetypes.guess_type(f)\n if os.path.splitext(f)[1] in extnames:\n files_out += [f]\n elif mime[0] == 'text/plain':\n files_out += list(np.loadtxt(f, unpack=True, dtype='str'))\n else:\n raise Exception('Unrecognized input type.')\n\n return files_out"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nextracts event selection strings from the XML file.", "response": "def get_cuts_from_xml(xmlfile):\n \"\"\"Extract event selection strings from the XML file.\"\"\"\n\n root = ElementTree.ElementTree(file=xmlfile).getroot()\n event_maps = root.findall('EventMap')\n alias_maps = root.findall('AliasDict')[0]\n\n event_classes = {}\n event_types = {}\n event_aliases = {}\n\n for m in event_maps:\n if m.attrib['altName'] == 'EVENT_CLASS':\n for c in m.findall('EventCategory'):\n event_classes[c.attrib['name']] = strip(\n c.find('ShortCut').text)\n elif m.attrib['altName'] == 'EVENT_TYPE':\n for c in m.findall('EventCategory'):\n event_types[c.attrib['name']] = strip(c.find('ShortCut').text)\n\n for m in alias_maps.findall('Alias'):\n event_aliases[m.attrib['name']] = strip(m.text)\n\n replace_aliases(event_aliases, event_aliases.copy())\n replace_aliases(event_aliases, event_aliases.copy())\n replace_aliases(event_classes, event_aliases)\n replace_aliases(event_types, event_aliases)\n\n event_selections = {}\n event_selections.update(event_classes)\n event_selections.update(event_types)\n event_selections.update(event_aliases)\n\n return event_selections"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef set_event_list(tree, selection=None, fraction=None, start_fraction=None):\n import ROOT\n\n elist = rand_str()\n\n if selection is None:\n cuts = ''\n else:\n cuts = selection\n\n if fraction is None or fraction >= 1.0:\n n = tree.Draw(\">>%s\" % elist, cuts, \"goff\")\n tree.SetEventList(ROOT.gDirectory.Get(elist))\n elif start_fraction is None:\n nentries = int(tree.GetEntries())\n first_entry = min(int((1.0 - fraction) * nentries), nentries)\n n = tree.Draw(\">>%s\" % elist, cuts, \"goff\", nentries, first_entry)\n tree.SetEventList(ROOT.gDirectory.Get(elist))\n else:\n nentries = int(tree.GetEntries())\n first_entry = min(int(start_fraction * nentries), nentries)\n n = first_entry + int(nentries * fraction)\n n = tree.Draw(\">>%s\" % elist, cuts, \"goff\",\n n - first_entry, first_entry)\n tree.SetEventList(ROOT.gDirectory.Get(elist))\n\n return n", "response": "Sets the event list for a tree or chain."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef localize(self, name, **kwargs):\n timer = Timer.create(start=True)\n name = self.roi.get_source_by_name(name).name\n\n schema = ConfigSchema(self.defaults['localize'],\n optimizer=self.defaults['optimizer'])\n schema.add_option('use_cache', True)\n schema.add_option('prefix', '')\n config = utils.create_dict(self.config['localize'],\n optimizer=self.config['optimizer'])\n config = schema.create_config(config, **kwargs)\n\n self.logger.info('Running localization for %s' % name)\n\n free_state = FreeParameterState(self)\n loc = self._localize(name, **config)\n free_state.restore()\n\n self.logger.info('Finished localization.')\n\n if config['make_plots']:\n self._plotter.make_localization_plots(loc, self.roi,\n prefix=config['prefix'])\n\n outfile = \\\n utils.format_filename(self.workdir, 'loc',\n prefix=[config['prefix'],\n name.lower().replace(' ', '_')])\n\n if config['write_fits']:\n loc['file'] = os.path.basename(outfile) + '.fits'\n self._make_localize_fits(loc, outfile + '.fits',\n **config)\n\n if config['write_npy']:\n np.save(outfile + '.npy', dict(loc))\n\n self.logger.info('Execution time: %.2f s', timer.elapsed_time)\n return loc", "response": "This function finds the best fit position of a source and returns a dictionary containing the results of the localization analysis."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nlocalizing a source from its TS map.", "response": "def _fit_position_tsmap(self, name, **kwargs):\n \"\"\"Localize a source from its TS map.\"\"\"\n\n prefix = kwargs.get('prefix', '')\n dtheta_max = kwargs.get('dtheta_max', 0.5)\n zmin = kwargs.get('zmin', -3.0)\n\n kw = {\n 'map_size': 2.0 * dtheta_max,\n 'write_fits': kwargs.get('write_fits', False),\n 'write_npy': kwargs.get('write_npy', False),\n 'use_pylike': kwargs.get('use_pylike', True),\n 'max_kernel_radius': self.config['tsmap']['max_kernel_radius'],\n 'loglevel': logging.DEBUG\n }\n\n src = self.roi.copy_source(name)\n\n if src['SpatialModel'] in ['RadialDisk', 'RadialGaussian']:\n kw['max_kernel_radius'] = max(kw['max_kernel_radius'],\n 2.0 * src['SpatialWidth'])\n\n skydir = kwargs.get('skydir', src.skydir)\n tsmap = self.tsmap(utils.join_strings([prefix, name.lower().\n replace(' ', '_')]),\n model=src.data,\n map_skydir=skydir,\n exclude=[name],\n make_plots=False, **kw)\n\n # Find peaks with TS > 4\n peaks = find_peaks(tsmap['ts'], 4.0, 0.2)\n peak_best = None\n o = {}\n for p in sorted(peaks, key=lambda t: t['amp'], reverse=True):\n xy = p['ix'], p['iy']\n ts_value = tsmap['ts'].data[xy[1], xy[0]]\n posfit = fit_error_ellipse(tsmap['ts'], xy=xy, dpix=2,\n zmin=max(zmin, -ts_value * 0.5))\n offset = posfit['skydir'].separation(self.roi[name].skydir).deg\n if posfit['fit_success'] and posfit['fit_inbounds']:\n peak_best = p\n break\n\n if peak_best is None:\n ts_value = np.max(tsmap['ts'].data)\n posfit = fit_error_ellipse(tsmap['ts'], dpix=2,\n zmin=max(zmin, -ts_value * 0.5))\n\n o.update(posfit)\n pix = posfit['skydir'].to_pixel(self.geom.wcs)\n o['xpix'] = float(pix[0])\n o['ypix'] = float(pix[1])\n o['skydir'] = posfit['skydir'].transform_to('icrs')\n o['pos_offset'] = posfit['skydir'].separation(\n self.roi[name].skydir).deg\n o['loglike'] = 0.5 * posfit['zoffset']\n o['tsmap'] = tsmap['ts']\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef make_nfs_path(path):\n if os.path.isabs(path):\n fullpath = path\n else:\n fullpath = os.path.abspath(path)\n if len(fullpath) < 6:\n return fullpath\n if fullpath[0:6] == '/gpfs/':\n fullpath = fullpath.replace('/gpfs/', '/nfs/')\n return fullpath", "response": "Make a nfs version of a file path."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmakes a gpfs version of a file path.", "response": "def make_gpfs_path(path):\n \"\"\"Make a gpfs version of a file path.\n This just puts /gpfs at the beginning instead of /nfs\"\"\"\n if os.path.isabs(path):\n fullpath = os.path.abspath(path)\n else:\n fullpath = os.path.abspath(path)\n if len(fullpath) < 5:\n return fullpath\n if fullpath[0:5] == '/nfs/':\n fullpath = fullpath.replace('/nfs/', '/gpfs/')\n return fullpath"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncounting and print the number of jobs in various LSF states", "response": "def get_lsf_status():\n \"\"\"Count and print the number of jobs in various LSF states\n \"\"\"\n status_count = {'RUN': 0,\n 'PEND': 0,\n 'SUSP': 0,\n 'USUSP': 0,\n 'NJOB': 0,\n 'UNKNWN': 0}\n\n try:\n subproc = subprocess.Popen(['bjobs'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n subproc.stderr.close()\n output = subproc.stdout.readlines()\n except OSError:\n return status_count\n\n for line in output[1:]:\n line = line.strip().split()\n # Protect against format of multiproc jobs\n if len(line) < 5: \n continue\n\n status_count['NJOB'] += 1\n\n for k in status_count:\n if line[2] == k:\n status_count[k] += 1\n\n return status_count"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef build_bsub_command(command_template, lsf_args):\n if command_template is None:\n return \"\"\n full_command = 'bsub -o {logfile}'\n for key, value in lsf_args.items():\n full_command += ' -%s' % key\n if value is not None:\n full_command += ' %s' % value\n full_command += ' %s' % command_template\n return full_command", "response": "Build and return a lsf batch command template"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_slac_default_args(job_time=1500):\n slac_default_args = dict(lsf_args={'W': job_time,\n 'R': '\\\"select[rhel60&&!fell]\\\"'},\n max_jobs=500,\n time_per_cycle=15,\n jobs_per_cycle=20,\n max_job_age=90,\n no_batch=False)\n return slac_default_args.copy()", "response": "Create a default set of arguments for the batch job interface."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):\n full_sub_dict = job_config.copy()\n\n if self._no_batch:\n full_command = \"%s >& %s\" % (\n link.command_template().format(**full_sub_dict), logfile)\n else:\n full_sub_dict['logfile'] = logfile\n full_command_template = build_bsub_command(\n link.command_template(), self._lsf_args)\n full_command = full_command_template.format(**full_sub_dict)\n\n logdir = os.path.dirname(logfile)\n\n print_bsub = True\n if self._dry_run:\n if print_bsub:\n stream.write(\"%s\\n\" % full_command)\n return 0\n\n try:\n os.makedirs(logdir)\n except OSError:\n pass\n proc = subprocess.Popen(full_command.split(),\n stderr=stream,\n stdout=stream)\n proc.communicate()\n return proc.returncode", "response": "Dispatches a single job to the LSF batch"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nsubmit all the jobs in job_dict.", "response": "def submit_jobs(self, link, job_dict=None, job_archive=None, stream=sys.stdout):\n \"\"\"Submit all the jobs in job_dict \"\"\"\n if link is None:\n return JobStatus.no_job\n if job_dict is None:\n job_keys = link.jobs.keys()\n else:\n job_keys = sorted(job_dict.keys())\n\n # copy & reverse the keys b/c we will be popping item off the back of\n # the list\n unsubmitted_jobs = job_keys\n unsubmitted_jobs.reverse()\n\n failed = False\n if unsubmitted_jobs:\n if stream != sys.stdout:\n sys.stdout.write('Submitting jobs (%i): ' %\n len(unsubmitted_jobs))\n sys.stdout.flush()\n while unsubmitted_jobs:\n status = get_lsf_status()\n njob_to_submit = min(self._max_jobs - status['NJOB'],\n self._jobs_per_cycle,\n len(unsubmitted_jobs))\n\n if self._dry_run:\n njob_to_submit = len(unsubmitted_jobs)\n\n for i in range(njob_to_submit):\n job_key = unsubmitted_jobs.pop()\n\n # job_details = job_dict[job_key]\n job_details = link.jobs[job_key]\n job_config = job_details.job_config\n if job_details.status == JobStatus.failed:\n clean_job(job_details.logfile, {}, self._dry_run)\n # clean_job(job_details.logfile,\n # job_details.outfiles, self.args['dry_run'])\n\n job_config['logfile'] = job_details.logfile\n new_job_details = self.dispatch_job(\n link, job_key, job_archive, stream)\n if new_job_details.status == JobStatus.failed:\n failed = True\n clean_job(new_job_details.logfile,\n new_job_details.outfiles, self._dry_run)\n link.jobs[job_key] = new_job_details\n\n if unsubmitted_jobs:\n if stream != sys.stdout:\n sys.stdout.write('.')\n sys.stdout.flush()\n stream.write('Sleeping %.0f seconds between submission cycles\\n' %\n self._time_per_cycle)\n time.sleep(self._time_per_cycle)\n\n if failed:\n return JobStatus.failed\n\n if stream != sys.stdout:\n sys.stdout.write('!\\n')\n\n return JobStatus.done"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nloads an FT2 file from a file or list of files.", "response": "def create_sc_table(scfile, colnames=None):\n \"\"\"Load an FT2 file from a file or list of files.\"\"\"\n\n if utils.is_fits_file(scfile) and colnames is None:\n return create_table_from_fits(scfile, 'SC_DATA')\n\n if utils.is_fits_file(scfile):\n files = [scfile]\n else:\n files = [line.strip() for line in open(scfile, 'r')]\n\n tables = [create_table_from_fits(f, 'SC_DATA', colnames)\n for f in files]\n\n return vstack(tables)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncomputes the local spectral index of a source.", "response": "def get_spectral_index(src, egy):\n \"\"\"Compute the local spectral index of a source.\"\"\"\n delta = 1E-5\n f0 = src.spectrum()(pyLike.dArg(egy * (1 - delta)))\n f1 = src.spectrum()(pyLike.dArg(egy * (1 + delta)))\n\n if f0 > 0 and f1 > 0:\n gamma = np.log10(f0 / f1) / np.log10((1 - delta) / (1 + delta))\n else:\n gamma = np.nan\n\n return gamma"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create(cls, infile, config=None, params=None, mask=None):\n\n infile = os.path.abspath(infile)\n roi_file, roi_data = utils.load_data(infile)\n\n if config is None:\n config = roi_data['config']\n validate = False\n else:\n validate = True\n\n gta = cls(config, validate=validate)\n gta.setup(init_sources=False)\n gta.load_roi(infile, params=params, mask=mask)\n return gta", "response": "Create a new GTAnalysis from an analysis output file."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmake a clone of this analysis instance.", "response": "def clone(self, config, **kwargs):\n \"\"\"Make a clone of this analysis instance.\"\"\"\n gta = GTAnalysis(config, **kwargs)\n gta._roi = copy.deepcopy(self.roi)\n return gta"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef set_random_seed(self, seed):\n self.config['mc']['seed'] = seed\n np.random.seed(seed)", "response": "Set the seed for the random number generator"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ndeletes and reload a source in the model.", "response": "def reload_source(self, name, init_source=True):\n \"\"\"Delete and reload a source in the model. This will update\n the spatial model of this source to the one defined in the XML\n model.\"\"\"\n\n for c in self.components:\n c.reload_source(name)\n\n if init_source:\n self._init_source(name)\n\n self.like.model = self.like.components[0].model"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nset the spatial model of a source.", "response": "def set_source_morphology(self, name, **kwargs):\n \"\"\"Set the spatial model of a source.\n\n Parameters\n ----------\n name : str\n Source name.\n\n spatial_model : str\n Spatial model name (PointSource, RadialGaussian, etc.).\n\n spatial_pars : dict\n Dictionary of spatial parameters (optional).\n\n use_cache : bool \n Generate the spatial model by interpolating the cached source\n map.\n\n use_pylike : bool\n\n \"\"\"\n\n name = self.roi.get_source_by_name(name).name\n src = self.roi[name]\n\n spatial_model = kwargs.get('spatial_model', src['SpatialModel'])\n spatial_pars = kwargs.get('spatial_pars', {})\n use_pylike = kwargs.get('use_pylike', True)\n psf_scale_fn = kwargs.get('psf_scale_fn', None)\n update_source = kwargs.get('update_source', False)\n\n if hasattr(pyLike.BinnedLikelihood, 'setSourceMapImage') and not use_pylike:\n src.set_spatial_model(spatial_model, spatial_pars)\n self._update_srcmap(src.name, src, psf_scale_fn=psf_scale_fn)\n else:\n src = self.delete_source(name, loglevel=logging.DEBUG,\n save_template=False)\n src.set_spatial_model(spatial_model, spatial_pars)\n self.add_source(src.name, src, init_source=False,\n use_pylike=use_pylike, loglevel=logging.DEBUG)\n\n if update_source:\n self.update_source(name)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nsets the spectral model of a source. This function can be used to change the spectral type of a source or modify its spectral parameters. If called with spectrum_type='FileFunction' and spectrum_pars=None, the source spectrum will be replaced with a FileFunction with the same differential flux distribution as the original spectrum. Parameters ---------- name : str Source name. spectrum_type : str Spectrum type (PowerLaw, etc.). spectrum_pars : dict Dictionary of spectral parameters (optional). update_source : bool Recompute all source characteristics (flux, TS, NPred) using the new spectral model of the source.", "response": "def set_source_spectrum(self, name, spectrum_type='PowerLaw',\n spectrum_pars=None, update_source=True):\n \"\"\"Set the spectral model of a source. This function can be\n used to change the spectral type of a source or modify its\n spectral parameters. If called with\n spectrum_type='FileFunction' and spectrum_pars=None, the\n source spectrum will be replaced with a FileFunction with the\n same differential flux distribution as the original spectrum.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n spectrum_type : str\n Spectrum type (PowerLaw, etc.).\n\n spectrum_pars : dict\n Dictionary of spectral parameters (optional).\n\n update_source : bool\n Recompute all source characteristics (flux, TS, NPred)\n using the new spectral model of the source.\n\n \"\"\"\n name = self.roi.get_source_by_name(name).name\n src = self.roi[name]\n\n spectrum_pars = {} if spectrum_pars is None else spectrum_pars\n if (self.roi[name]['SpectrumType'] == 'PowerLaw' and\n spectrum_type == 'LogParabola'):\n spectrum_pars.setdefault('beta', {'value': 0.0, 'scale': 1.0,\n 'min': 0.0, 'max': 1.0})\n spectrum_pars.setdefault('Eb', src.spectral_pars['Scale'])\n spectrum_pars.setdefault('norm', src.spectral_pars['Prefactor'])\n if 'alpha' not in spectrum_pars:\n spectrum_pars['alpha'] = src.spectral_pars['Index']\n spectrum_pars['alpha']['value'] *= -1.0\n if spectrum_pars['alpha']['scale'] == -1.0:\n spectrum_pars['alpha']['value'] *= -1.0\n spectrum_pars['alpha']['scale'] *= -1.0\n\n if spectrum_type == 'FileFunction':\n self._create_filefunction(name, spectrum_pars)\n else:\n fn = gtutils.create_spectrum_from_dict(spectrum_type,\n spectrum_pars)\n self.like.setSpectrum(str(name), fn)\n\n # Get parameters\n src = self.components[0].like.logLike.getSource(str(name))\n pars_dict = gtutils.get_function_pars_dict(src.spectrum())\n\n self.roi[name]['SpectrumType'] = spectrum_type\n self.roi[name].set_spectral_pars(pars_dict)\n for c in self.components:\n c.roi[name]['SpectrumType'] = spectrum_type\n c.roi[name].set_spectral_pars(pars_dict)\n\n if update_source:\n self.update_source(name)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting the differential flux distribution of a source with the FileFunction spectral type.", "response": "def set_source_dnde(self, name, dnde, update_source=True):\n \"\"\"Set the differential flux distribution of a source with the\n FileFunction spectral type.\n\n Parameters\n ----------\n name : str\n Source name.\n\n dnde : `~numpy.ndarray`\n Array of differential flux values (cm^{-2} s^{-1} MeV^{-1}).\n \"\"\"\n name = self.roi.get_source_by_name(name).name\n\n if self.roi[name]['SpectrumType'] != 'FileFunction':\n msg = 'Wrong spectral type: %s' % self.roi[name]['SpectrumType']\n self.logger.error(msg)\n raise Exception(msg)\n\n xy = self.get_source_dnde(name)\n\n if len(dnde) != len(xy[0]):\n msg = 'Wrong length for dnde array: %i' % len(dnde)\n self.logger.error(msg)\n raise Exception(msg)\n\n for c in self.components:\n src = c.like.logLike.getSource(str(name))\n spectrum = src.spectrum()\n file_function = pyLike.FileFunction_cast(spectrum)\n file_function.setSpectrum(10**xy[0], dnde)\n\n if update_source:\n self.update_source(name)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _create_filefunction(self, name, spectrum_pars):\n\n spectrum_pars = {} if spectrum_pars is None else spectrum_pars\n\n if 'loge' in spectrum_pars:\n loge = spectrum_pars.get('loge')\n else:\n ebinsz = (self.log_energies[-1] -\n self.log_energies[0]) / self.enumbins\n loge = utils.extend_array(self.log_energies, ebinsz, 0.5, 6.5)\n\n # Get the values\n dnde = np.zeros(len(loge))\n if 'dnde' in spectrum_pars:\n dnde = spectrum_pars.get('dnde')\n else:\n dnde = np.array([self.like[name].spectrum()(pyLike.dArg(10 ** egy))\n for egy in loge])\n\n filename = \\\n os.path.join(self.workdir,\n '%s_filespectrum.txt' % (name.lower().replace(' ', '_')))\n\n # Create file spectrum txt file\n np.savetxt(filename, np.vstack((10**loge, dnde)).T)\n self.like.setSpectrum(name, str('FileFunction'))\n\n self.roi[name]['Spectrum_Filename'] = filename\n # Update\n for c in self.components:\n src = c.like.logLike.getSource(str(name))\n spectrum = src.spectrum()\n\n spectrum.getParam(str('Normalization')).setBounds(1E-3, 1E3)\n\n file_function = pyLike.FileFunction_cast(spectrum)\n file_function.readFunction(str(filename))\n c.roi[name]['Spectrum_Filename'] = filename", "response": "Create the file function for an existing source."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef stage_output(self):\n\n if self.workdir == self.outdir:\n return\n elif not os.path.isdir(self.workdir):\n self.logger.error('Working directory does not exist.')\n return\n\n regex = self.config['fileio']['outdir_regex']\n savefits = self.config['fileio']['savefits']\n files = os.listdir(self.workdir)\n self.logger.info('Staging files to %s', self.outdir)\n\n fitsfiles = []\n for c in self.components:\n for f in c.files.values():\n\n if f is None:\n continue\n\n fitsfiles += [os.path.basename(f)]\n\n for f in files:\n\n wpath = os.path.join(self.workdir, f)\n opath = os.path.join(self.outdir, f)\n\n if not utils.match_regex_list(regex, os.path.basename(f)):\n continue\n if os.path.isfile(opath) and filecmp.cmp(wpath, opath, False):\n continue\n if not savefits and f in fitsfiles:\n continue\n\n\n self.logger.debug('Copying ' + f)\n self.logger.info('Copying ' + f)\n shutil.copy(wpath, self.outdir)\n\n self.logger.info('Finished.')", "response": "Copy data products to final output directory."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef stage_input(self):\n\n if self.workdir == self.outdir:\n return\n elif not os.path.isdir(self.workdir):\n self.logger.error('Working directory does not exist.')\n return\n\n self.logger.info('Staging files to %s', self.workdir)\n\n files = [os.path.join(self.outdir, f)\n for f in os.listdir(self.outdir)]\n\n regex = copy.deepcopy(self.config['fileio']['workdir_regex'])\n\n for f in files:\n\n if not os.path.isfile(f):\n continue\n if not utils.match_regex_list(regex, os.path.basename(f)):\n continue\n\n self.logger.debug('Copying ' + os.path.basename(f))\n shutil.copy(f, self.workdir)\n\n for c in self.components:\n for f in c.files.values():\n\n if f is None:\n continue\n\n wpath = os.path.join(self.workdir, os.path.basename(f))\n opath = os.path.join(self.outdir, os.path.basename(f))\n\n if os.path.isfile(wpath):\n continue\n elif os.path.isfile(opath):\n self.logger.debug('Copying ' + os.path.basename(f))\n shutil.copy(opath, self.workdir)\n\n self.logger.info('Finished.')", "response": "Copy input files to working directory."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef setup(self, init_sources=True, overwrite=False, **kwargs):\n\n loglevel = kwargs.get('loglevel', self.loglevel)\n\n self.logger.log(loglevel, 'Running setup.')\n\n # Make spatial maps for extended sources\n for s in self.roi.sources:\n if s.diffuse:\n continue\n if not s.extended:\n continue\n self.make_template(s)\n\n # Run setup for each component\n for i, c in enumerate(self.components):\n c.setup(overwrite=overwrite)\n\n # Create likelihood\n self._create_likelihood()\n\n # Determine tmin, tmax\n for i, c in enumerate(self._components):\n self._tmin = (c.tmin if self._tmin is None\n else min(self._tmin, c.tmin))\n self._tmax = (c.tmax if self._tmax is None\n else min(self._tmax, c.tmax))\n\n if init_sources:\n\n self.logger.log(loglevel, 'Initializing source properties')\n for name in self.like.sourceNames():\n self.logger.debug('Initializing source %s', name)\n self._init_source(name)\n self._update_roi()\n\n self.logger.log(loglevel, 'Finished setup.')", "response": "This function performs pre - processing for each analysis component and construct a joint likelihood object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _create_likelihood(self, srcmdl=None):\n\n self._like = SummedLikelihood()\n for c in self.components:\n c._create_binned_analysis(srcmdl)\n self._like.addComponent(c.like)\n\n self.like.model = self.like.components[0].model\n self._fitcache = None\n self._init_roi_model()", "response": "Instantiate the likelihood object for each component and create a SummedLikelihood object."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef generate_model(self, model_name=None):\n\n for i, c in enumerate(self._components):\n c.generate_model(model_name=model_name)", "response": "Generate model maps for all components."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsetting the energy bounds of the analysis. This restricts the evaluation of the likelihood to the data that falls in this range. Input values will be rounded to the closest bin edge value. If either argument is None then the lower or upper bound of the analysis instance will be used. Parameters ---------- logemin : float Lower energy bound in log10(E/MeV). logemax : float Upper energy bound in log10(E/MeV). Returns ------- eminmax : array Minimum and maximum energy in log10(E/MeV).", "response": "def set_energy_range(self, logemin, logemax):\n \"\"\"Set the energy bounds of the analysis. This restricts the\n evaluation of the likelihood to the data that falls in this\n range. Input values will be rounded to the closest bin edge\n value. If either argument is None then the lower or upper\n bound of the analysis instance will be used.\n\n Parameters\n ----------\n\n logemin : float\n Lower energy bound in log10(E/MeV).\n\n logemax : float\n Upper energy bound in log10(E/MeV).\n\n Returns\n -------\n\n eminmax : array\n Minimum and maximum energy in log10(E/MeV).\n\n \"\"\"\n\n if logemin is None:\n logemin = self.log_energies[0]\n else:\n imin = int(utils.val_to_edge(self.log_energies, logemin)[0])\n logemin = self.log_energies[imin]\n\n if logemax is None:\n logemax = self.log_energies[-1]\n else:\n imax = int(utils.val_to_edge(self.log_energies, logemax)[0])\n logemax = self.log_energies[imax]\n\n self._loge_bounds = np.array([logemin, logemax])\n self._roi_data['loge_bounds'] = np.copy(self.loge_bounds)\n for c in self.components:\n c.set_energy_range(logemin, logemax)\n\n return self._loge_bounds"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the model counts map for a single source a list of sources or for the sum of all sources in the ROI.", "response": "def model_counts_map(self, name=None, exclude=None, use_mask=False):\n \"\"\"Return the model counts map for a single source, a list of\n sources, or for the sum of all sources in the ROI. The\n exclude parameter can be used to exclude one or more\n components when generating the model map.\n\n Parameters\n ----------\n name : str or list of str\n\n Parameter controlling the set of sources for which the\n model counts map will be calculated. If name=None the\n model map will be generated for all sources in the ROI.\n\n exclude : str or list of str\n\n List of sources that will be excluded when calculating the\n model map.\n\n use_mask : bool\n\n Parameter that specifies in the model counts map should include\n mask pixels (i.e., ones whose weights are <= 0)\n\n Returns\n -------\n map : `~gammapy.maps.Map`\n \"\"\" \n maps = [c.model_counts_map(name, exclude, use_mask=use_mask)\n for c in self.components]\n return skymap.coadd_maps(self.geom, maps)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the predicted number of model counts versus energy for a given source and energy range.", "response": "def model_counts_spectrum(self, name, logemin=None, logemax=None,\n summed=False, weighted=False):\n \"\"\"Return the predicted number of model counts versus energy\n for a given source and energy range. If summed=True return\n the counts spectrum summed over all components otherwise\n return a list of model spectra. If weighted=True return \n the weighted version of the counts spectrum\n \"\"\"\n\n if logemin is None:\n logemin = self.log_energies[0]\n if logemax is None:\n logemax = self.log_energies[-1]\n\n if summed:\n cs = np.zeros(self.enumbins)\n imin = utils.val_to_bin_bounded(self.log_energies,\n logemin + 1E-7)[0]\n imax = utils.val_to_bin_bounded(self.log_energies,\n logemax - 1E-7)[0] + 1\n\n for c in self.components:\n ecenter = 0.5 * (c.log_energies[:-1] + c.log_energies[1:])\n counts = c.model_counts_spectrum(name, self.log_energies[0],\n self.log_energies[-1], weighted)\n cs += np.histogram(ecenter,\n weights=counts,\n bins=self.log_energies)[0]\n\n return cs[imin:imax]\n else:\n cs = []\n for c in self.components:\n cs += [c.model_counts_spectrum(name, logemin,\n logemax, weighted=weighted)]\n return cs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nretrieving list of sources in the ROI satisfying the given cuts distance skydir and exclude.", "response": "def get_sources(self, cuts=None, distance=None, skydir=None,\n minmax_ts=None, minmax_npred=None, exclude=None,\n square=False):\n \"\"\"Retrieve list of sources in the ROI satisfying the given\n selections.\n\n Returns\n -------\n srcs : list\n A list of `~fermipy.roi_model.Model` objects.\n\n \"\"\"\n\n coordsys = self.config['binning']['coordsys']\n return self.roi.get_sources(skydir, distance, cuts,\n minmax_ts, minmax_npred,\n exclude, square,\n coordsys=coordsys)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nadding a source to the ROI model.", "response": "def add_source(self, name, src_dict, free=None, init_source=True,\n save_source_maps=True, use_pylike=True,\n use_single_psf=False, **kwargs):\n \"\"\"Add a source to the ROI model. This function may be called\n either before or after `~fermipy.gtanalysis.GTAnalysis.setup`.\n\n Parameters\n ----------\n name : str\n Source name.\n\n src_dict : dict or `~fermipy.roi_model.Source` object\n Dictionary or source object defining the source properties\n (coordinates, spectral parameters, etc.).\n\n free : bool\n Initialize the source with a free normalization parameter.\n\n use_pylike : bool\n Create source maps with pyLikelihood.\n\n use_single_psf : bool \n Use the PSF model calculated for the ROI center. If false\n then a new model will be generated using the position of\n the source.\n\n \"\"\"\n\n if self.roi.has_source(name):\n msg = 'Source %s already exists.' % name\n self.logger.error(msg)\n raise Exception(msg)\n\n loglevel = kwargs.pop('loglevel', self.loglevel)\n\n self.logger.log(loglevel, 'Adding source ' + name)\n\n src = self.roi.create_source(name, src_dict, rescale=True)\n self.make_template(src)\n\n for c in self.components:\n c.add_source(name, src_dict, free=free,\n save_source_maps=save_source_maps,\n use_pylike=use_pylike,\n use_single_psf=use_single_psf)\n\n if self._like is None:\n return\n\n if self.config['gtlike']['edisp'] and src.name not in \\\n self.config['gtlike']['edisp_disable']:\n self.set_edisp_flag(src.name, True)\n\n self.like.syncSrcParams(str(name))\n self.like.model = self.like.components[0].model\n # if free is not None:\n # self.free_norm(name, free, loglevel=logging.DEBUG)\n\n if init_source:\n self._init_source(name)\n self._update_roi()\n\n if self._fitcache is not None:\n self._fitcache.update_source(name)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_sources_from_roi(self, names, roi, free=False, **kwargs):\n\n for name in names:\n self.add_source(name, roi[name].data, free=free, **kwargs)", "response": "Add multiple sources to the current ROI model copied from another ROI model."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef delete_source(self, name, save_template=True, delete_source_map=False,\n build_fixed_wts=True, **kwargs):\n \"\"\"Delete a source from the ROI model.\n\n Parameters\n ----------\n name : str\n Source name.\n\n save_template : bool\n Keep the SpatialMap FITS template associated with this\n source.\n\n delete_source_map : bool\n Delete the source map associated with this source from the\n source maps file.\n\n Returns\n -------\n src : `~fermipy.roi_model.Model`\n The deleted source object.\n\n \"\"\"\n\n if not self.roi.has_source(name):\n self.logger.error('No source with name: %s', name)\n return\n\n loglevel = kwargs.pop('loglevel', self.loglevel)\n\n self.logger.log(loglevel, 'Deleting source %s', name)\n\n # STs require a source to be freed before deletion\n if self.like is not None:\n self.free_norm(name, loglevel=logging.DEBUG)\n\n for c in self.components:\n c.delete_source(name, save_template=save_template,\n delete_source_map=delete_source_map,\n build_fixed_wts=build_fixed_wts)\n\n src = self.roi.get_source_by_name(name)\n self.roi.delete_sources([src])\n if self.like is not None:\n self.like.model = self.like.components[0].model\n self._update_roi()\n return src", "response": "Delete a source from the ROI model."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ndeleting sources in the ROI model satisfying the given selection criteria.", "response": "def delete_sources(self, cuts=None, distance=None,\n skydir=None, minmax_ts=None, minmax_npred=None,\n exclude=None, square=False, names=None):\n \"\"\"Delete sources in the ROI model satisfying the given\n selection criteria.\n\n Parameters\n ----------\n cuts : dict\n Dictionary of [min,max] selections on source properties.\n\n distance : float\n Cut on angular distance from ``skydir``. If None then no\n selection will be applied.\n\n skydir : `~astropy.coordinates.SkyCoord`\n Reference sky coordinate for ``distance`` selection. If\n None then the distance selection will be applied with\n respect to the ROI center.\n\n minmax_ts : list\n Select sources that have TS in the range [min,max]. If\n either min or max are None then only a lower (upper) bound\n will be applied. If this parameter is none no selection\n will be applied.\n\n minmax_npred : list\n Select sources that have npred in the range [min,max]. If\n either min or max are None then only a lower (upper) bound\n will be applied. If this parameter is none no selection\n will be applied.\n\n square : bool\n Switch between applying a circular or square (ROI-like)\n selection on the maximum projected distance from the ROI\n center.\n\n names : list \n Select sources matching a name in this list.\n\n Returns\n -------\n srcs : list\n A list of `~fermipy.roi_model.Model` objects.\n\n \"\"\"\n\n srcs = self.roi.get_sources(skydir=skydir, distance=distance, cuts=cuts,\n minmax_ts=minmax_ts, minmax_npred=minmax_npred,\n exclude=exclude, square=square,\n coordsys=self.config[\n 'binning']['coordsys'],\n names=names)\n\n for s in srcs:\n self.delete_source(s.name, build_fixed_wts=False)\n\n if self.like is not None:\n # Build fixed model weights in one pass\n for c in self.components:\n c.like.logLike.buildFixedModelWts()\n\n self._update_roi()\n\n return srcs"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef free_sources_by_name(self, names, free=True, pars=None,\n **kwargs):\n \"\"\"Free all sources with names matching ``names``.\n\n Parameters\n ----------\n names : list\n List of source names.\n\n free : bool\n Choose whether to free (free=True) or fix (free=False)\n source parameters.\n\n pars : list\n Set a list of parameters to be freed/fixed for each\n source. If none then all source parameters will be\n freed/fixed. If pars='norm' then only normalization\n parameters will be freed.\n\n Returns\n -------\n srcs : list\n A list of `~fermipy.roi_model.Model` objects. \n \"\"\"\n\n if names is None:\n return\n\n names = [names] if not isinstance(names, list) else names\n names = [self.roi.get_source_by_name(t).name for t in names]\n srcs = [s for s in self.roi.sources if s.name in names]\n for s in srcs:\n self.free_source(s.name, free=free, pars=pars, **kwargs)\n\n return srcs", "response": "Free all sources with names matching names."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef free_sources(self, free=True, pars=None, cuts=None,\n distance=None, skydir=None, minmax_ts=None, minmax_npred=None,\n exclude=None, square=False, **kwargs):\n \"\"\"Free or fix sources in the ROI model satisfying the given\n selection. When multiple selections are defined, the selected\n sources will be those satisfying the logical AND of all\n selections (e.g. distance < X && minmax_ts[0] < ts <\n minmax_ts[1] && ...).\n\n Parameters\n ----------\n\n free : bool\n Choose whether to free (free=True) or fix (free=False)\n source parameters.\n\n pars : list\n Set a list of parameters to be freed/fixed for each\n source. If none then all source parameters will be\n freed/fixed. If pars='norm' then only normalization\n parameters will be freed.\n\n cuts : dict\n Dictionary of [min,max] selections on source properties.\n\n distance : float\n Cut on angular distance from ``skydir``. If None then no\n selection will be applied.\n\n skydir : `~astropy.coordinates.SkyCoord`\n Reference sky coordinate for ``distance`` selection. If\n None then the distance selection will be applied with\n respect to the ROI center.\n\n minmax_ts : list\n Free sources that have TS in the range [min,max]. If\n either min or max are None then only a lower (upper) bound\n will be applied. If this parameter is none no selection\n will be applied.\n\n minmax_npred : list\n Free sources that have npred in the range [min,max]. If\n either min or max are None then only a lower (upper) bound\n will be applied. If this parameter is none no selection\n will be applied.\n\n exclude : list\n Names of sources that will be excluded from the selection.\n\n square : bool\n Switch between applying a circular or square (ROI-like)\n selection on the maximum projected distance from the ROI\n center.\n\n Returns\n -------\n srcs : list\n A list of `~fermipy.roi_model.Model` objects.\n\n \"\"\"\n\n srcs = self.roi.get_sources(skydir=skydir, distance=distance,\n cuts=cuts, minmax_ts=minmax_ts,\n minmax_npred=minmax_npred, exclude=exclude,\n square=square,\n coordsys=self.config['binning']['coordsys'])\n\n for s in srcs:\n self.free_source(s.name, free=free, pars=pars, **kwargs)\n\n return srcs", "response": "Free or fix sources in the ROI model satisfying the given\n selection."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef set_parameter(self, name, par, value, true_value=True, scale=None,\n bounds=None, error=None, update_source=True):\n \"\"\"\n Update the value of a parameter. Parameter bounds will\n automatically be adjusted to encompass the new parameter\n value.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n par : str\n Parameter name.\n\n value : float\n Parameter value. By default this argument should be the\n unscaled (True) parameter value.\n\n scale : float\n Parameter scale (optional). Value argument is interpreted\n with respect to the scale parameter if it is provided.\n\n error : float\n Parameter error (optional). By default this argument should be the\n unscaled (True) parameter value.\n\n update_source : bool\n Update the source dictionary for the object.\n\n \"\"\"\n name = self.roi.get_source_by_name(name).name\n idx = self.like.par_index(name, par)\n current_bounds = list(self.like.model[idx].getBounds())\n\n if scale is not None:\n self.like[idx].setScale(scale)\n else:\n scale = self.like.model[idx].getScale()\n\n if true_value:\n current_bounds[0] = min(current_bounds[0], value / scale)\n current_bounds[1] = max(current_bounds[1], value / scale)\n if error is not None:\n error = error / scale\n else:\n current_bounds[0] = min(current_bounds[0], value)\n current_bounds[1] = max(current_bounds[1], value)\n\n # update current bounds to encompass new value\n self.like[idx].setBounds(*current_bounds)\n\n if true_value:\n for p in self.like[idx].pars:\n p.setTrueValue(value)\n else:\n self.like[idx].setValue(value)\n\n if bounds is not None:\n if true_value:\n bounds[0] = min(bounds[0], value / scale)\n bounds[1] = max(bounds[1], value / scale)\n else:\n bounds[0] = min(bounds[0], value)\n bounds[1] = max(bounds[1], value)\n\n # For some reason the numerical accuracy is causing this to throw exceptions.\n try:\n if bounds is not None:\n self.like[idx].setBounds(*bounds)\n except RuntimeError:\n self.logger.warning(\n \"Caught failure on setBounds for %s::%s.\" % (name, par))\n pass\n\n if error is not None:\n self.like[idx].setError(error)\n\n self._sync_params(name)\n\n if update_source:\n self.update_source(name)", "response": "Set the value of a parameter in the source dictionary."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef set_parameter_scale(self, name, par, scale):\n name = self.roi.get_source_by_name(name).name\n idx = self.like.par_index(name, par)\n current_bounds = list(self.like.model[idx].getBounds())\n current_scale = self.like.model[idx].getScale()\n current_value = self.like[idx].getValue()\n\n self.like[idx].setScale(scale)\n self.like[idx].setValue(current_value * current_scale / scale)\n self.like[idx].setBounds(current_bounds[0] * current_scale / scale,\n current_bounds[1] * current_scale / scale)\n self._sync_params(name)", "response": "Update the scale of a parameter while keeping its value constant."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_parameter_bounds(self, name, par, bounds):\n idx = self.like.par_index(name, par)\n self.like[idx].setBounds(*bounds)\n self._sync_params(name)", "response": "Set the bounds on the scaled value of a parameter."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsets the error on the value of a parameter.", "response": "def set_parameter_error(self, name, par, error):\n \"\"\"Set the error on the value of a parameter.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n par : str\n Parameter name.\n\n error : float\n The value for the parameter error\n \"\"\"\n idx = self.like.par_index(name, par)\n self.like[idx].setError(error)\n self._sync_params(name)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nlocking the specified parameter in the specified source.", "response": "def lock_parameter(self, name, par, lock=True):\n \"\"\"Set parameter to locked/unlocked state. A locked parameter\n will be ignored when running methods that free/fix sources or\n parameters.\n\n Parameters\n ----------\n name : str\n Source name.\n\n par : str\n Parameter name.\n\n lock : bool \n Set parameter to locked (True) or unlocked (False) state.\n \"\"\"\n name = self.roi.get_source_by_name(name).name\n lck_params = self._lck_params.setdefault(name, [])\n\n if lock:\n self.free_parameter(name, par, False)\n if not par in lck_params:\n lck_params += [par]\n else:\n if par in lck_params:\n lck_params.remove(par)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef free_parameter(self, name, par, free=True):\n name = self.get_source_name(name)\n if par in self._lck_params.get(name, []):\n return\n idx = self.like.par_index(name, par)\n self.like[idx].setFree(free)\n self._sync_params(name)", "response": "Free a parameter of a source by name."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef lock_source(self, name, lock=True):\n name = self.get_source_name(name)\n if lock:\n par_names = self.get_source_params(name)\n self.free_source(name, False, pars=par_names)\n self._lck_params[name] = par_names\n else:\n self._lck_params[name] = []", "response": "Locks the source with the specified name."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef free_source(self, name, free=True, pars=None, **kwargs):\n\n free_pars = self.get_free_param_vector()\n\n loglevel = kwargs.pop('loglevel', self.loglevel)\n\n # Find the source\n src = self.roi.get_source_by_name(name)\n name = src.name\n\n if pars is None or (isinstance(pars, list) and not pars):\n pars = []\n pars += norm_parameters.get(src['SpectrumType'], [])\n pars += shape_parameters.get(src['SpectrumType'], [])\n elif pars == 'norm':\n pars = []\n pars += norm_parameters.get(src['SpectrumType'], [])\n elif pars == 'shape':\n pars = []\n pars += shape_parameters.get(src['SpectrumType'], [])\n elif isinstance(pars, list):\n pass\n else:\n raise Exception('Invalid parameter list.')\n\n # Remove locked parameters\n lck_params = self._lck_params.get(name, [])\n pars = [p for p in pars if p not in lck_params]\n\n # Deduce here the names of all parameters from the spectral type\n src_par_names = pyLike.StringVector()\n self.like[name].src.spectrum().getParamNames(src_par_names)\n\n par_indices = []\n par_names = []\n for p in src_par_names:\n if pars is not None and p not in pars:\n continue\n\n idx = self.like.par_index(name, p)\n if free == free_pars[idx]:\n continue\n\n par_indices.append(idx)\n par_names.append(p)\n\n if len(par_names) == 0:\n return\n\n if free:\n self.logger.log(loglevel, 'Freeing parameters for %-22s: %s',\n name, par_names)\n else:\n self.logger.log(loglevel, 'Fixing parameters for %-22s: %s',\n name, par_names)\n\n for (idx, par_name) in zip(par_indices, par_names):\n self.like[idx].setFree(free)\n self._sync_params_state(name)", "response": "Free a source and its parameters."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nfree normalization of a source.", "response": "def free_norm(self, name, free=True, **kwargs):\n \"\"\"Free/Fix normalization of a source.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n free : bool\n Choose whether to free (free=True) or fix (free=False).\n\n \"\"\"\n name = self.get_source_name(name)\n normPar = self.like.normPar(name).getName()\n self.free_source(name, pars=[normPar], free=free, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfrees index of a source.", "response": "def free_index(self, name, free=True, **kwargs):\n \"\"\"Free/Fix index of a source.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n free : bool\n Choose whether to free (free=True) or fix (free=False).\n\n \"\"\"\n src = self.roi.get_source_by_name(name)\n self.free_source(name, free=free,\n pars=index_parameters.get(src['SpectrumType'], []),\n **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nfree shape parameters of a source.", "response": "def free_shape(self, name, free=True, **kwargs):\n \"\"\"Free/Fix shape parameters of a source.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n free : bool\n Choose whether to free (free=True) or fix (free=False).\n \"\"\"\n src = self.roi.get_source_by_name(name)\n self.free_source(name, free=free,\n pars=shape_parameters[src['SpectrumType']],\n **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_source_name(self, name):\n if name not in self.like.sourceNames():\n name = self.roi.get_source_by_name(name).name\n return name", "response": "Return the name of a source as it is defined in the\n pyLikelihood model object."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef optimize(self, **kwargs):\n\n loglevel = kwargs.pop('loglevel', self.loglevel)\n timer = Timer.create(start=True)\n self.logger.log(loglevel, 'Starting')\n\n loglike0 = -self.like()\n self.logger.debug('LogLike: %f' % loglike0)\n\n # Extract options from kwargs\n config = copy.deepcopy(self.config['roiopt'])\n config['optimizer'] = copy.deepcopy(self.config['optimizer'])\n fermipy.config.validate_config(kwargs, config)\n config = merge_dict(config, kwargs)\n\n # Extract options from kwargs\n npred_frac_threshold = config['npred_frac']\n npred_threshold = config['npred_threshold']\n shape_ts_threshold = config['shape_ts_threshold']\n max_free_sources = config['max_free_sources']\n skip = copy.deepcopy(config['skip'])\n\n o = defaults.make_default_dict(defaults.roiopt_output)\n o['config'] = config\n o['loglike0'] = loglike0\n\n # preserve free parameters\n free = self.get_free_param_vector()\n\n # Fix all parameters\n self.free_sources(free=False, loglevel=logging.DEBUG)\n\n # Free norms of sources for which the sum of npred is a\n # fraction > npred_frac of the total model counts in the ROI\n npred_sum = 0\n skip_sources = skip if skip != None else []\n joint_norm_fit = []\n\n # EAC, we need this try block by older version of the ST don't have has_weights function\n try:\n if self.like.logLike.has_weights():\n npred_str = 'npred_wt'\n else:\n npred_str = 'npred'\n except AttributeError:\n npred_str = 'npred'\n\n # FIXME, EAC, use npred_wt here\n for s in sorted(self.roi.sources, key=lambda t: t[npred_str],\n reverse=True):\n\n npred_sum += s[npred_str]\n npred_frac = npred_sum / self._roi_data[npred_str]\n\n if s.name in skip_sources:\n continue\n\n self.free_norm(s.name, loglevel=logging.DEBUG)\n joint_norm_fit.append(s.name)\n\n if npred_frac > npred_frac_threshold:\n break\n if s[npred_str] < npred_threshold:\n break\n if len(joint_norm_fit) >= max_free_sources:\n break\n\n print (\"Joint fit \", joint_norm_fit)\n self.fit(loglevel=logging.DEBUG, **config['optimizer'])\n self.free_sources(free=False, loglevel=logging.DEBUG)\n\n # Step through remaining sources and re-fit normalizations\n # FIXME, EAC, use npred_wt here\n for s in sorted(self.roi.sources, key=lambda t: t[npred_str],\n reverse=True):\n\n if s.name in skip_sources or s.name in joint_norm_fit:\n continue\n\n if s[npred_str] < npred_threshold:\n self.logger.debug(\n 'Skipping %s with npred %10.3f', s.name, s[npred_str])\n continue\n\n self.logger.debug('Fitting %s npred: %10.3f TS: %10.3f',\n s.name, s[npred_str], s['ts'])\n self.free_norm(s.name, loglevel=logging.DEBUG)\n self.fit(loglevel=logging.DEBUG, **config['optimizer'])\n self.logger.debug('Post-fit Results npred: %10.3f TS: %10.3f',\n s[npred_str], s['ts'])\n self.free_norm(s.name, free=False, loglevel=logging.DEBUG)\n\n # Refit spectral shape parameters for sources with TS >\n # shape_ts_threshold\n for s in sorted(self.roi.sources,\n key=lambda t: t['ts'] if np.isfinite(t['ts']) else 0,\n reverse=True):\n\n if s.name in skip_sources:\n continue\n\n if s['ts'] < shape_ts_threshold or not np.isfinite(s['ts']):\n continue\n\n print ('Fitting shape %s TS: %10.3f' % (s.name, s['ts']))\n self.logger.debug('Fitting shape %s TS: %10.3f', s.name, s['ts'])\n self.free_source(s.name, loglevel=logging.DEBUG)\n self.fit(loglevel=logging.DEBUG, **config['optimizer'])\n self.free_source(s.name, free=False, loglevel=logging.DEBUG)\n\n self.set_free_param_vector(free)\n\n loglike1 = -self.like()\n\n o['loglike1'] = loglike1\n o['dloglike'] = loglike1 - loglike0\n\n self.logger.log(loglevel, 'Finished')\n self.logger.log(loglevel, 'LogLike: %f Delta-LogLike: %f',\n loglike1, loglike1 - loglike0)\n self.logger.log(loglevel, 'Execution time: %.2f s', timer.elapsed_time)\n return o", "response": "Iteratively optimize the ROI model."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nprofile the normalization of a source.", "response": "def profile_norm(self, name, logemin=None, logemax=None, reoptimize=False,\n xvals=None, npts=None, fix_shape=True, savestate=True,\n **kwargs):\n \"\"\"Profile the normalization of a source.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n reoptimize : bool\n Re-optimize free parameters in the model at each point\n in the profile likelihood scan.\n\n \"\"\"\n\n self.logger.debug('Profiling %s', name)\n\n if savestate:\n saved_state = LikelihoodState(self.like)\n\n if fix_shape:\n self.free_sources(False, pars='shape', loglevel=logging.DEBUG)\n\n if npts is None:\n npts = self.config['gtlike']['llscan_npts']\n\n # Find the source\n name = self.roi.get_source_by_name(name).name\n parName = self.like.normPar(name).getName()\n\n loge_bounds = self.loge_bounds\n if logemin is not None or logemax is not None:\n self.set_energy_range(logemin, logemax)\n\n # Find a sequence of values for the normalization scan\n if xvals is None:\n if reoptimize:\n xvals = self._find_scan_pts_reopt(name, npts=npts,\n **kwargs)\n else:\n xvals = self._find_scan_pts(name, npts=9)\n lnlp = self.profile(name, parName,\n reoptimize=False, xvals=xvals)\n lims = utils.get_parameter_limits(lnlp['xvals'],\n lnlp['dloglike'],\n cl_limit=0.99)\n\n if not np.isfinite(lims['ul']):\n self.logger.warning('Upper limit not found. '\n 'Refitting normalization.')\n self.like.optimize(0)\n xvals = self._find_scan_pts(name, npts=npts)\n lnlp = self.profile(name, parName,\n reoptimize=False,\n xvals=xvals)\n lims = utils.get_parameter_limits(lnlp['xvals'],\n lnlp['dloglike'],\n cl_limit=0.99)\n\n if np.isfinite(lims['ll']):\n xhi = np.linspace(lims['x0'], lims['ul'], npts - npts // 2)\n xlo = np.linspace(lims['ll'], lims['x0'], npts // 2)\n xvals = np.concatenate((xlo[:-1], xhi))\n xvals = np.insert(xvals, 0, 0.0)\n elif np.abs(lnlp['dloglike'][0] - lims['lnlmax']) > 0.1:\n lims['ll'] = 0.0\n xhi = np.linspace(lims['x0'], lims['ul'],\n (npts + 1) - (npts + 1) // 2)\n xlo = np.linspace(lims['ll'], lims['x0'], (npts + 1) // 2)\n xvals = np.concatenate((xlo[:-1], xhi))\n else:\n xvals = np.linspace(0, lims['ul'], npts)\n\n o = self.profile(name, parName,\n reoptimize=reoptimize, xvals=xvals,\n savestate=savestate, **kwargs)\n\n if savestate:\n saved_state.restore()\n\n if logemin is not None or logemax is not None:\n self.set_energy_range(*loge_bounds)\n\n self.logger.debug('Finished')\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef profile(self, name, parName, logemin=None, logemax=None,\n reoptimize=False,\n xvals=None, npts=None, savestate=True, **kwargs):\n \"\"\"Profile the likelihood for the given source and parameter.\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n parName : str\n Parameter name.\n\n reoptimize : bool\n Re-fit nuisance parameters at each step in the scan. Note\n that enabling this option will only re-fit parameters that\n were free when the method was executed.\n\n Returns\n -------\n\n lnlprofile : dict\n Dictionary containing results of likelihood scan.\n\n \"\"\"\n # Find the source\n name = self.roi.get_source_by_name(name).name\n\n par = self.like.normPar(name)\n parName = self.like.normPar(name).getName()\n idx = self.like.par_index(name, parName)\n bounds = self.like.model[idx].getBounds()\n value = self.like.model[idx].getValue()\n loge_bounds = self.loge_bounds\n\n optimizer = kwargs.get('optimizer', self.config['optimizer'])\n\n if savestate:\n saved_state = self._latch_state()\n\n # If parameter is fixed temporarily free it\n par.setFree(True)\n if optimizer['optimizer'] == 'NEWTON':\n self._create_fitcache()\n\n if logemin is not None or logemax is not None:\n loge_bounds = self.set_energy_range(logemin, logemax)\n else:\n loge_bounds = self.loge_bounds\n\n loglike0 = -self.like()\n\n if xvals is None:\n\n err = par.error()\n val = par.getValue()\n if err <= 0 or val <= 3 * err:\n xvals = 10 ** np.linspace(-2.0, 2.0, 51)\n if val < xvals[0]:\n xvals = np.insert(xvals, val, 0)\n else:\n xvals = np.linspace(0, 1, 25)\n xvals = np.concatenate((-1.0 * xvals[1:][::-1], xvals))\n xvals = val * 10 ** xvals\n\n if np.isnan(xvals).any():\n raise RuntimeError(\n \"Parameter scan points for %s::%s include infinite value.\" % (name, parName))\n\n # Update parameter bounds to encompass scan range\n try:\n self.like[idx].setBounds(min(min(xvals), value, bounds[0]),\n max(max(xvals), value, bounds[1]))\n except RuntimeError:\n self.logger.warning(\n \"Caught failure on setBounds for %s::%s.\" % (name, parName))\n\n o = {'xvals': xvals,\n 'npred': np.zeros(len(xvals)),\n 'npred_wt': np.zeros(len(xvals)),\n 'dnde': np.zeros(len(xvals)),\n 'flux': np.zeros(len(xvals)),\n 'eflux': np.zeros(len(xvals)),\n 'dloglike': np.zeros(len(xvals)),\n 'loglike': np.zeros(len(xvals))\n }\n\n if reoptimize and hasattr(self.like.components[0].logLike,\n 'setUpdateFixedWeights'):\n\n for c in self.components:\n c.like.logLike.setUpdateFixedWeights(False)\n\n for i, x in enumerate(xvals):\n\n try:\n self.like[idx] = x\n except RuntimeError:\n self.logger.warning(\n \"Caught failure on set for %s::%s: %.2f\" % (name, parName, x))\n\n if self.like.nFreeParams() > 1 and reoptimize:\n # Only reoptimize if not all frozen\n self.like.freeze(idx)\n fit_output = self._fit(errors=False, **optimizer)\n loglike1 = fit_output['loglike']\n self.like.thaw(idx)\n else:\n loglike1 = -self.like()\n\n flux = self.like[name].flux(10 ** loge_bounds[0],\n 10 ** loge_bounds[1])\n eflux = self.like[name].energyFlux(10 ** loge_bounds[0],\n 10 ** loge_bounds[1])\n prefactor = self.like[idx]\n\n o['dloglike'][i] = loglike1 - loglike0\n o['loglike'][i] = loglike1\n o['dnde'][i] = prefactor.getTrueValue()\n o['flux'][i] = flux\n o['eflux'][i] = eflux\n\n cs = self.model_counts_spectrum(name,\n loge_bounds[0],\n loge_bounds[1], summed=True)\n o['npred'][i] += np.sum(cs)\n cs_wt = self.model_counts_spectrum(name,\n loge_bounds[0],\n loge_bounds[1], summed=True, weighted=True)\n o['npred_wt'][i] += np.sum(cs_wt)\n\n self.like[idx] = value\n\n if reoptimize and hasattr(self.like.components[0].logLike,\n 'setUpdateFixedWeights'):\n\n for c in self.components:\n c.like.logLike.setUpdateFixedWeights(True)\n\n # Restore model parameters to original values\n if savestate:\n saved_state.restore()\n\n self.like[idx].setBounds(*bounds)\n if logemin is not None or logemax is not None:\n self.set_energy_range(*loge_bounds)\n\n return o", "response": "Profile the likelihood for the given source and parameter."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nconstrain the normalizations of one or more sources by adding gaussian priors with sigma equal to the parameter error times a scaling factor.", "response": "def constrain_norms(self, srcNames, cov_scale=1.0):\n \"\"\"Constrain the normalizations of one or more sources by\n adding gaussian priors with sigma equal to the parameter\n error times a scaling factor.\"\"\"\n\n # Get the covariance matrix\n\n for name in srcNames:\n par = self.like.normPar(name)\n\n err = par.error()\n val = par.getValue()\n\n if par.error() == 0.0 or not par.isFree():\n continue\n\n self.add_gauss_prior(name, par.getName(),\n val, err * cov_scale)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncreates an object of type MINUIT or NewMinuit type optimizer object", "response": "def _create_optObject(self, **kwargs):\n \"\"\" Make MINUIT or NewMinuit type optimizer object \"\"\"\n\n optimizer = kwargs.get('optimizer',\n self.config['optimizer']['optimizer'])\n\n if optimizer.upper() == 'MINUIT':\n optObject = pyLike.Minuit(self.like.logLike)\n elif optimizer.upper() == 'NEWMINUIT':\n optObject = pyLike.NewMinuit(self.like.logLike)\n else:\n optFactory = pyLike.OptimizerFactory_instance()\n optObject = optFactory.create(str(optimizer), self.like.logLike)\n return optObject"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nexecutes a fit of all sources with the specified parameters.", "response": "def fit(self, update=True, **kwargs):\n \"\"\"Run the likelihood optimization. This will execute a fit of all\n parameters that are currently free in the model and update the\n charateristics of the corresponding model components (TS,\n npred, etc.). The fit will be repeated N times (set with the\n `retries` parameter) until a fit quality greater than or equal\n to `min_fit_quality` and a fit status code of 0 is obtained.\n If the fit does not succeed after N retries then all parameter\n values will be reverted to their state prior to the execution\n of the fit.\n\n Parameters\n ----------\n\n update : bool\n Update the model dictionary for all sources with free\n parameters.\n\n tol : float\n Set the optimizer tolerance.\n\n verbosity : int\n Set the optimizer output level.\n\n optimizer : str\n Set the likelihood optimizer (e.g. MINUIT or NEWMINUIT).\n\n retries : int\n Set the number of times to rerun the fit when the fit quality\n is < 3.\n\n min_fit_quality : int\n Set the minimum fit quality. If the fit quality is smaller\n than this value then all model parameters will be\n restored to their values prior to the fit.\n\n reoptimize : bool\n Refit background sources when updating source properties\n (TS and likelihood profiles).\n\n Returns\n -------\n\n fit : dict\n Dictionary containing diagnostic information from the fit\n (fit quality, parameter covariances, etc.).\n\n \"\"\"\n\n loglevel = kwargs.pop('loglevel', self.loglevel)\n self.logger.log(loglevel, \"Starting fit.\")\n\n # Extract options from kwargs\n config = copy.deepcopy(self.config['optimizer'])\n config.setdefault('covar', True)\n config.setdefault('reoptimize', False)\n config = utils.merge_dict(config, kwargs)\n\n num_free = self.like.nFreeParams()\n\n loglike0 = -self.like()\n\n # Initialize output dict\n o = {'fit_quality': 3,\n 'fit_status': 0,\n 'fit_success': True,\n 'dloglike': 0.0,\n 'edm': 0.0,\n 'loglike': loglike0,\n 'covariance': None,\n 'correlation': None,\n 'values': np.ones(num_free) * np.nan,\n 'errors': np.ones(num_free) * np.nan,\n 'indices': np.zeros(num_free, dtype=int),\n 'is_norm': np.empty(num_free, dtype=bool),\n 'src_names': num_free * [None],\n 'par_names': num_free * [None],\n 'config': config\n }\n\n if not num_free:\n self.logger.log(loglevel, \"Skipping fit. No free parameters.\")\n return o\n\n saved_state = LikelihoodState(self.like)\n\n fit_output = self._fit(**config)\n o.update(fit_output)\n\n self.logger.debug(\"Fit complete.\")\n o['dloglike'] = o['loglike'] - loglike0\n\n if not o['fit_success']:\n self.logger.error('%s failed with status code %i fit quality %i',\n config['optimizer'], o['fit_status'],\n o['fit_quality'])\n saved_state.restore()\n return o\n\n if update:\n\n free_params = self.get_params(True)\n self._extract_correlation(o, free_params)\n for name in self.like.sourceNames():\n freePars = self.get_free_source_params(name)\n if len(freePars) == 0:\n continue\n self.update_source(name, reoptimize=config['reoptimize'])\n\n # Update roi model counts\n self._update_roi()\n\n self.logger.log(loglevel, \"Fit returned successfully. \" +\n \"Quality: %3i Status: %3i\",\n o['fit_quality'], o['fit_status'])\n self.logger.log(loglevel, \"LogLike: %12.3f DeltaLogLike: %12.3f \",\n o['loglike'], o['dloglike'])\n return o"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _fit_newton(self, fitcache=None, ebin=None, **kwargs):\n\n tol = kwargs.get('tol', self.config['optimizer']['tol'])\n max_iter = kwargs.get('max_iter',\n self.config['optimizer']['max_iter'])\n init_lambda = kwargs.get('init_lambda',\n self.config['optimizer']['init_lambda'])\n use_reduced = kwargs.get('use_reduced', True)\n\n free_params = self.get_params(True)\n free_norm_params = [p for p in free_params if p['is_norm'] is True]\n\n if len(free_params) != len(free_norm_params):\n msg = 'Executing Newton fitter with one ' + \\\n 'or more free shape parameters.'\n self.logger.error(msg)\n raise Exception(msg)\n\n verbosity = kwargs.get('verbosity', 0)\n if fitcache is None:\n fitcache = self._create_fitcache(**kwargs)\n\n fitcache.update(self.get_params(),\n tol, max_iter, init_lambda, use_reduced)\n\n logemin = self.loge_bounds[0]\n logemax = self.loge_bounds[1]\n imin = int(utils.val_to_edge(self.log_energies, logemin)[0])\n imax = int(utils.val_to_edge(self.log_energies, logemax)[0])\n\n if ebin is not None:\n fitcache.fitcache.setEnergyBin(ebin)\n elif imin == 0 and imax == self.enumbins:\n fitcache.fitcache.setEnergyBin(-1)\n else:\n fitcache.fitcache.setEnergyBins(imin, imax)\n\n num_free = len(free_norm_params)\n\n o = {'fit_status': 0,\n 'fit_quality': 3,\n 'fit_success': True,\n 'edm': 0,\n 'loglike': None,\n 'values': np.ones(num_free) * np.nan,\n 'errors': np.ones(num_free) * np.nan,\n 'indices': np.zeros(num_free, dtype=int),\n 'is_norm': np.empty(num_free, dtype=bool),\n 'src_names': num_free * [None],\n 'par_names': num_free * [None],\n }\n\n if num_free == 0:\n return o\n\n ref_vals = np.array(fitcache.fitcache.refValues())\n free = np.array(fitcache.fitcache.currentFree())\n norm_vals = ref_vals[free]\n\n norm_idxs = []\n for i, p in enumerate(free_norm_params):\n\n norm_idxs += [p['idx']]\n o['indices'][i] = p['idx']\n o['src_names'][i] = p['src_name']\n o['par_names'][i] = p['par_name']\n o['is_norm'][i] = p['is_norm']\n\n o['fit_status'] = fitcache.fit(verbose=verbosity)\n o['edm'] = fitcache.fitcache.currentEDM()\n\n pars, errs, cov = fitcache.get_pars()\n pars *= norm_vals\n errs *= norm_vals\n cov = cov * np.outer(norm_vals, norm_vals)\n\n o['values'] = pars\n o['errors'] = errs\n o['covariance'] = cov\n errinv = np.zeros_like(o['errors'])\n m = o['errors'] > 0\n errinv[m] = 1. / o['errors'][m]\n o['correlation'] = o['covariance'] * np.outer(errinv, errinv)\n\n if o['fit_status'] in [-2, 0]:\n for idx, val, err in zip(norm_idxs, pars, errs):\n self._set_value_bounded(idx, val)\n self.like[idx].setError(err)\n self.like.syncSrcParams()\n o['fit_success'] = True\n else:\n o['fit_success'] = False\n\n if o['fit_status']:\n self.logger.error('Error in NEWTON fit. Fit Status: %i',\n o['fit_status'])\n\n # FIXME: Figure out why currentLogLike gets out of sync\n #loglike = fitcache.fitcache.currentLogLike()\n #prior_vals, prior_errs, has_prior = gtutils.get_priors(self.like)\n #loglike -= np.sum(has_prior) * np.log(np.sqrt(2 * np.pi))\n loglike = -self.like()\n o['loglike'] = loglike\n\n return o", "response": "Fast fitting method using Newton fitter."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef load_xml(self, xmlfile):\n\n self.logger.info('Loading XML')\n\n for c in self.components:\n c.load_xml(xmlfile)\n\n for name in self.like.sourceNames():\n self.update_source(name)\n\n self._fitcache = None\n\n self.logger.info('Finished Loading XML')", "response": "Load model definition from XML file."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef load_parameters_from_yaml(self, yamlfile, update_sources=False):\n d = utils.load_yaml(yamlfile)\n for src, src_pars in d.items():\n for par_name, par_dict in src_pars.items():\n if par_name in ['SpectrumType']:\n continue\n par_value = par_dict.get('value', None)\n par_error = par_dict.get('error', None)\n par_scale = par_dict.get('scale', None)\n par_min = par_dict.get('min', None)\n par_max = par_dict.get('max', None)\n par_free = par_dict.get('free', None)\n if par_min is not None and par_max is not None:\n par_bounds = [par_min, par_max]\n else:\n par_bounds = None\n try:\n self.set_parameter(src, par_name, par_value, true_value=False,\n scale=par_scale, bounds=par_bounds, error=par_error,\n update_source=update_sources)\n except RuntimeError as msg:\n self.logger.warn(msg)\n self.logger.warn(\"Did not set parameter %s:%s\"%(src,par_name)) \n continue\n except Exception as msg:\n self.logger.warn(msg)\n continue\n if par_free is not None:\n self.free_parameter(src, par_name, par_free)\n self._sync_params_state()", "response": "Load model parameters from a yaml file."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _restore_counts_maps(self):\n\n for c in self.components:\n c.restore_counts_maps()\n\n if hasattr(self.like.components[0].logLike, 'setCountsMap'):\n self._init_roi_model()\n else:\n self.write_xml('tmp')\n self._like = SummedLikelihood()\n for i, c in enumerate(self._components):\n c._create_binned_analysis()\n self._like.addComponent(c.like)\n self._init_roi_model()\n self.load_xml('tmp')", "response": "Revert counts maps to their state prior to injecting any simulated\n components."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef simulate_source(self, src_dict=None):\n\n self._fitcache = None\n\n if src_dict is None:\n src_dict = {}\n else:\n src_dict = copy.deepcopy(src_dict)\n\n skydir = wcs_utils.get_target_skydir(src_dict, self.roi.skydir)\n\n src_dict.setdefault('ra', skydir.ra.deg)\n src_dict.setdefault('dec', skydir.dec.deg)\n src_dict.setdefault('SpatialModel', 'PointSource')\n src_dict.setdefault('SpatialWidth', 0.3)\n src_dict.setdefault('Index', 2.0)\n src_dict.setdefault('Prefactor', 1E-13)\n\n self.add_source('mcsource', src_dict, free=True,\n init_source=False)\n for c in self.components:\n c.simulate_roi('mcsource', clear=False)\n\n self.delete_source('mcsource')\n\n if hasattr(self.like.components[0].logLike, 'setCountsMap'):\n self._init_roi_model()\n else:\n self.write_xml('tmp')\n self._like = SummedLikelihood()\n for i, c in enumerate(self._components):\n c._create_binned_analysis('tmp.xml')\n self._like.addComponent(c.like)\n self._init_roi_model()\n self.load_xml('tmp')", "response": "Simulate a source and inject the simulated source counts into the data."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsimulates the current model and replace the data counts cube with the simulation of the ROI.", "response": "def simulate_roi(self, name=None, randomize=True, restore=False):\n \"\"\"Generate a simulation of the ROI using the current best-fit model\n and replace the data counts cube with this simulation. The\n simulation is created by generating an array of Poisson random\n numbers with expectation values drawn from the model cube of\n the binned analysis instance. This function will update the\n counts cube both in memory and in the source map file. The\n counts cube can be restored to its original state by calling\n this method with ``restore`` = True.\n\n Parameters\n ----------\n name : str\n Name of the model component to be simulated. If None then\n the whole ROI will be simulated.\n\n restore : bool\n Restore the data counts cube to its original state.\n \"\"\"\n\n self.logger.info('Simulating ROI')\n\n self._fitcache = None\n\n if restore:\n self.logger.info('Restoring')\n self._restore_counts_maps()\n self.logger.info('Finished')\n return\n\n for c in self.components:\n c.simulate_roi(name=name, clear=True, randomize=randomize)\n\n if hasattr(self.like.components[0].logLike, 'setCountsMap'):\n self._init_roi_model()\n else:\n self.write_xml('tmp')\n self._like = SummedLikelihood()\n for i, c in enumerate(self._components):\n c._create_binned_analysis('tmp.xml')\n self._like.addComponent(c.like)\n self._init_roi_model()\n self.load_xml('tmp')\n\n self.logger.info('Finished')"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef write_model_map(self, model_name, name=None):\n maps = [c.write_model_map(model_name, name) for c in self.components]\n\n outfile = os.path.join(self.workdir,\n 'mcube_%s.fits' % (model_name))\n\n mmap = Map.from_geom(self.geom)\n for m in maps:\n mmap.coadd(m)\n mmap.write(outfile, overwrite=True, conv='fgst-ccube')\n return [mmap] + maps", "response": "Save the counts model map to a FITS file."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef write_weight_map(self, model_name):\n maps = [c.write_weight_map(model_name) for c in self.components]\n\n outfile = os.path.join(self.workdir,\n 'wcube_%s.fits' % (model_name))\n\n wmap = Map.from_geom(self.geom)\n # FIXME: Should we average weights maps rather than coadding?\n for m in maps:\n wmap.coadd(m)\n wmap.write(outfile, overwrite=True, conv='fgst-ccube')\n return [wmap] + maps", "response": "Save the counts model map to a FITS file."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nprints information about the spectral and spatial properties of the ROI.", "response": "def print_roi(self, loglevel=logging.INFO):\n \"\"\"Print information about the spectral and spatial properties\n of the ROI (sources, diffuse components).\"\"\"\n self.logger.log(loglevel, '\\n' + str(self.roi))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef print_params(self, allpars=False, loglevel=logging.INFO):\n\n pars = self.get_params()\n\n o = '\\n'\n o += '%4s %-20s%10s%10s%10s%10s%10s%5s\\n' % (\n 'idx', 'parname', 'value', 'error',\n 'min', 'max', 'scale', 'free')\n\n o += '-' * 80 + '\\n'\n\n src_pars = collections.OrderedDict()\n for p in pars:\n\n src_pars.setdefault(p['src_name'], [])\n src_pars[p['src_name']] += [p]\n\n free_sources = []\n for k, v in src_pars.items():\n\n for p in v:\n if not p['free']:\n continue\n\n free_sources += [k]\n\n for k, v in src_pars.items():\n\n if not allpars and k not in free_sources:\n continue\n\n o += '%s\\n' % k\n for p in v:\n\n o += '%4i %-20.19s' % (p['idx'], p['par_name'])\n o += '%10.3g%10.3g' % (p['value'], p['error'])\n o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'],\n p['scale'])\n\n if p['free']:\n o += ' *'\n else:\n o += ' '\n\n o += '\\n'\n\n self.logger.log(loglevel, o)", "response": "Print information about the model parameters."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nwrite current state of the analysis to a file. This method writes an XML model definition, a ROI dictionary, and a FITS source catalog file. A previously saved analysis state can be reloaded from the ROI dictionary file with the `~fermipy.gtanalysis.GTAnalysis.load_roi` method. Parameters ---------- outfile : str String prefix of the output files. The extension of this string will be stripped when generating the XML, YAML and npy filenames. make_plots : bool Generate diagnostic plots. save_model_map : bool Save the current counts model to a FITS file.", "response": "def write_roi(self, outfile=None,\n save_model_map=False, **kwargs):\n \"\"\"Write current state of the analysis to a file. This method\n writes an XML model definition, a ROI dictionary, and a FITS\n source catalog file. A previously saved analysis state can be\n reloaded from the ROI dictionary file with the\n `~fermipy.gtanalysis.GTAnalysis.load_roi` method.\n\n Parameters\n ----------\n\n outfile : str\n String prefix of the output files. The extension of this\n string will be stripped when generating the XML, YAML and\n npy filenames.\n\n make_plots : bool\n Generate diagnostic plots.\n\n save_model_map : bool\n Save the current counts model to a FITS file.\n\n \"\"\"\n # extract the results in a convenient format\n\n make_plots = kwargs.get('make_plots', False)\n save_weight_map = kwargs.get('save_weight_map', False)\n\n if outfile is None:\n pathprefix = os.path.join(self.config['fileio']['workdir'],\n 'results')\n elif not os.path.isabs(outfile):\n pathprefix = os.path.join(self.config['fileio']['workdir'],\n outfile)\n else:\n pathprefix = outfile\n\n pathprefix = utils.strip_suffix(pathprefix,\n ['fits', 'yaml', 'npy'])\n# pathprefix, ext = os.path.splitext(pathprefix)\n prefix = os.path.basename(pathprefix)\n\n xmlfile = pathprefix + '.xml'\n fitsfile = pathprefix + '.fits'\n npyfile = pathprefix + '.npy'\n\n self.write_xml(xmlfile)\n self.write_fits(fitsfile)\n\n if not self.config['gtlike']['use_external_srcmap']:\n for c in self.components:\n c.like.logLike.saveSourceMaps(str(c.files['srcmap']))\n\n if save_model_map:\n self.write_model_map(prefix)\n\n if save_weight_map:\n self.write_weight_map(prefix)\n\n o = {}\n o['roi'] = copy.deepcopy(self._roi_data)\n o['config'] = copy.deepcopy(self.config)\n o['version'] = fermipy.__version__\n o['stversion'] = fermipy.get_st_version()\n o['sources'] = {}\n\n for s in self.roi.sources:\n o['sources'][s.name] = copy.deepcopy(s.data)\n\n for i, c in enumerate(self.components):\n o['roi']['components'][i][\n 'src_expscale'] = copy.deepcopy(c.src_expscale)\n\n self.logger.info('Writing %s...', npyfile)\n np.save(npyfile, o)\n\n if make_plots:\n self.make_plots(prefix, None,\n **kwargs.get('plotting', {}))"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nmake diagnostic plots using the current ROI model.", "response": "def make_plots(self, prefix, mcube_map=None, **kwargs):\n \"\"\"Make diagnostic plots using the current ROI model.\"\"\"\n\n #mcube_maps = kwargs.pop('mcube_maps', None)\n if mcube_map is None:\n mcube_map = self.model_counts_map()\n\n plotter = plotting.AnalysisPlotter(self.config['plotting'],\n fileio=self.config['fileio'],\n logging=self.config['logging'])\n plotter.run(self, mcube_map, prefix=prefix, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ntest whether a source shows spectral curvature by comparing the likelihood ratio of PowerLaw and LogParabola spectral curvature.", "response": "def curvature(self, name, **kwargs):\n \"\"\"Test whether a source shows spectral curvature by comparing\n the likelihood ratio of PowerLaw and LogParabola spectral\n models.\n\n Parameters\n ----------\n name : str\n Source name.\n\n \"\"\"\n\n name = self.roi.get_source_by_name(name).name\n\n saved_state = LikelihoodState(self.like)\n source = self.components[0].like.logLike.getSource(str(name))\n\n old_spectrum = source.spectrum()\n old_pars = copy.deepcopy(self.roi[name].spectral_pars)\n old_type = self.roi[name]['SpectrumType']\n\n if old_type != 'PowerLaw':\n\n dnde = self.like[name].spectrum()(pyLike.dArg(1000.))\n value, scale = utils.scale_parameter(dnde)\n pars0 = {\n 'Prefactor':\n {'value': value, 'scale': scale,\n 'min': 1E-5, 'max': 1000., 'free': True},\n 'Index':\n {'value': 2.0, 'scale': -1.0, 'min': 0.0,\n 'max': 5.0, 'free': False},\n 'Scale':\n {'value': 1E3, 'scale': 1.0, 'min': 1.,\n 'max': 1E6, 'free': False},\n }\n self.set_source_spectrum(str(name), 'PowerLaw',\n spectrum_pars=pars0,\n update_source=False)\n\n self.free_source(name, loglevel=logging.DEBUG)\n fit_pl = self._fit(loglevel=logging.DEBUG)\n\n prefactor = self._get_param(name, 'Prefactor')\n index = self._get_param(name, 'Index')\n scale = self._get_param(name, 'Scale')\n\n pars1 = {\n 'norm': copy.deepcopy(prefactor),\n 'alpha': copy.deepcopy(index),\n 'Eb': copy.deepcopy(scale),\n }\n pars1['alpha']['scale'] *= -1\n pars1['alpha']['min'] = -5.0\n pars1['alpha']['max'] = 5.0\n\n self.set_source_spectrum(str(name), 'LogParabola',\n spectrum_pars=pars1,\n update_source=False)\n\n self.free_source(name, loglevel=logging.DEBUG)\n fit_lp = self._fit(loglevel=logging.DEBUG)\n\n self.set_source_spectrum(str(name), old_type,\n spectrum_pars=old_pars,\n update_source=False)\n\n pars2 = {\n 'Prefactor': copy.deepcopy(prefactor),\n 'Index1': copy.deepcopy(index),\n 'Cutoff': {'value': 1000.0, 'scale': 1E3,\n 'min': 10.0, 'max': 1E4, 'free': True},\n 'Index2': {'value': 1.0, 'scale': 1.0,\n 'min': 1.0, 'max': 1.0, 'free': False},\n 'Scale': copy.deepcopy(scale)\n }\n\n self.set_source_spectrum(str(name), 'PLSuperExpCutoff',\n spectrum_pars=pars2,\n update_source=False)\n\n self.free_source(name, loglevel=logging.DEBUG)\n fit_ple = self._fit(loglevel=logging.DEBUG)\n\n # Revert to initial spectral model\n self.set_source_spectrum(str(name), old_type,\n spectrum_pars=old_pars,\n update_source=False)\n saved_state.restore()\n\n lp_ts_curv = 2.0 * (fit_lp['loglike'] - fit_pl['loglike'])\n ple_ts_curv = 2.0 * (fit_ple['loglike'] - fit_pl['loglike'])\n o = MutableNamedTuple(ts_curv=lp_ts_curv,\n lp_ts_curv=lp_ts_curv,\n ple_ts_curv=ple_ts_curv,\n loglike_pl=fit_pl['loglike'],\n loglike_lp=fit_lp['loglike'],\n loglike_ple=fit_ple['loglike'])\n\n self.logger.info('LogLike_PL: %12.3f LogLike_LP: %12.3f LogLike_PLE: %12.3f',\n o.loglike_pl, o.loglike_lp, o.loglike_ple)\n self.logger.info('TS_curv: %.3f (LP)', o.lp_ts_curv)\n self.logger.info('TS_curv: %.3f (PLE)', o.ple_ts_curv)\n return o"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef bowtie(self, name, fd=None, loge=None):\n\n if loge is None:\n logemin = self.log_energies[0]\n logemax = self.log_energies[-1]\n loge = np.linspace(logemin, logemax, 50)\n\n o = {'energies': 10**loge,\n 'log_energies': loge,\n 'dnde': np.zeros(len(loge)) * np.nan,\n 'dnde_lo': np.zeros(len(loge)) * np.nan,\n 'dnde_hi': np.zeros(len(loge)) * np.nan,\n 'dnde_err': np.zeros(len(loge)) * np.nan,\n 'dnde_ferr': np.zeros(len(loge)) * np.nan,\n 'pivot_energy': np.nan}\n\n try:\n if fd is None:\n fd = FluxDensity.FluxDensity(self.like, name)\n except RuntimeError:\n self.logger.error('Failed to create FluxDensity',\n exc_info=True)\n return o\n\n dnde = [fd.value(10 ** x) for x in loge]\n dnde_err = [fd.error(10 ** x) for x in loge]\n\n dnde = np.array(dnde)\n dnde_err = np.array(dnde_err)\n m = dnde > 0\n\n fhi = np.zeros_like(dnde)\n flo = np.zeros_like(dnde)\n ferr = np.zeros_like(dnde)\n\n fhi[m] = dnde[m] * (1.0 + dnde_err[m] / dnde[m])\n flo[m] = dnde[m] / (1.0 + dnde_err[m] / dnde[m])\n ferr[m] = 0.5 * (fhi[m] - flo[m]) / dnde[m]\n fhi[~m] = dnde_err[~m]\n\n o['dnde'] = dnde\n o['dnde_lo'] = flo\n o['dnde_hi'] = fhi\n o['dnde_err'] = dnde_err\n o['dnde_ferr'] = ferr\n\n try:\n o['pivot_energy'] = 10 ** utils.interpolate_function_min(loge, o[\n 'dnde_ferr'])\n except Exception:\n self.logger.error('Failed to compute pivot energy',\n exc_info=True)\n\n return o", "response": "This method generates a spectral uncertainty band for the given source."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nupdates the dictionary for this source.", "response": "def update_source(self, name, paramsonly=False, reoptimize=False, **kwargs):\n \"\"\"Update the dictionary for this source.\n\n Parameters\n ----------\n\n name : str\n\n paramsonly : bool\n\n reoptimize : bool\n Re-fit background parameters in likelihood scan.\n\n \"\"\"\n\n npts = self.config['gtlike']['llscan_npts']\n optimizer = kwargs.get('optimizer', self.config['optimizer'])\n\n sd = self.get_src_model(name, paramsonly, reoptimize, npts,\n optimizer=optimizer)\n src = self.roi.get_source_by_name(name)\n src.update_data(sd)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a dictionary for a source with the current best - fit parameters.", "response": "def get_src_model(self, name, paramsonly=False, reoptimize=False,\n npts=None, **kwargs):\n \"\"\"Compose a dictionary for a source with the current best-fit\n parameters.\n\n Parameters\n ----------\n\n name : str\n\n paramsonly : bool\n Skip computing TS and likelihood profile.\n\n reoptimize : bool\n Re-fit background parameters in likelihood scan.\n\n npts : int\n Number of points for likelihood scan.\n\n Returns\n -------\n src_dict : dict\n\n \"\"\"\n\n self.logger.debug('Generating source dict for ' + name)\n\n optimizer = kwargs.get('optimizer', self.config['optimizer'])\n if npts is None:\n npts = self.config['gtlike']['llscan_npts']\n\n name = self.get_source_name(name)\n source = self.like[name].src\n spectrum = source.spectrum()\n normPar = self.like.normPar(name)\n\n src_dict = defaults.make_default_dict(defaults.source_flux_output)\n src_dict.update({'name': name,\n 'pivot_energy': 1000.,\n 'ts': np.nan,\n 'loglike': np.nan,\n 'npred': 0.0,\n 'npred_wt': 0.0,\n 'loglike_scan': np.nan * np.ones(npts),\n 'dloglike_scan': np.nan * np.ones(npts),\n 'eflux_scan': np.nan * np.ones(npts),\n 'flux_scan': np.nan * np.ones(npts),\n 'norm_scan': np.nan * np.ones(npts),\n })\n\n src_dict.update(gtutils.gtlike_spectrum_to_vectors(spectrum))\n src_dict['spectral_pars'] = gtutils.get_function_pars_dict(spectrum)\n\n # Get Counts Spectrum\n src_dict['model_counts'] = self.model_counts_spectrum(\n name, summed=True)\n src_dict['model_counts_wt'] = self.model_counts_spectrum(\n name, summed=True, weighted=True)\n\n # Get NPred\n src_dict['npred'] = self.like.NpredValue(str(name))\n # EAC, we need this b/c older version of the ST don't have the right signature\n try:\n src_dict['npred_wt'] = self.like.NpredValue(str(name), True)\n except (TypeError, NotImplementedError):\n src_dict['npred_wt'] = src_dict['npred']\n\n # Get the Model Fluxes\n try:\n thesrc = self.like[name]\n src_dict['flux'] = self.like.flux(name, self.energies[0],\n self.energies[-1])\n src_dict['flux100'] = self.like.flux(name, 100., 10 ** 5.5)\n src_dict['flux1000'] = self.like.flux(name, 1000., 10 ** 5.5)\n src_dict['flux10000'] = self.like.flux(name, 10000., 10 ** 5.5)\n src_dict['eflux'] = self.like.energyFlux(name,\n self.energies[0],\n self.energies[-1])\n src_dict['eflux100'] = self.like.energyFlux(name, 100.,\n 10 ** 5.5)\n src_dict['eflux1000'] = self.like.energyFlux(name, 1000.,\n 10 ** 5.5)\n src_dict['eflux10000'] = self.like.energyFlux(name, 10000.,\n 10 ** 5.5)\n src_dict['dnde'] = self.like[name].spectrum()(\n pyLike.dArg(src_dict['pivot_energy']))\n src_dict['dnde100'] = self.like[name].spectrum()(\n pyLike.dArg(100.))\n src_dict['dnde1000'] = self.like[name].spectrum()(\n pyLike.dArg(1000.))\n src_dict['dnde10000'] = self.like[name].spectrum()(\n pyLike.dArg(10000.))\n if normPar.getValue() == 0:\n normPar.setValue(1.0)\n\n dnde_index = -get_spectral_index(self.like[name],\n src_dict['pivot_energy'])\n\n dnde100_index = -get_spectral_index(self.like[name],\n 100.)\n dnde1000_index = -get_spectral_index(self.like[name],\n 1000.)\n dnde10000_index = -get_spectral_index(self.like[name],\n 10000.)\n\n normPar.setValue(0.0)\n else:\n dnde_index = -get_spectral_index(self.like[name],\n src_dict['pivot_energy'])\n\n dnde100_index = -get_spectral_index(self.like[name],\n 100.)\n dnde1000_index = -get_spectral_index(self.like[name],\n 1000.)\n dnde10000_index = -get_spectral_index(self.like[name],\n 10000.)\n src_dict['dnde_index'] = dnde_index\n src_dict['dnde100_index'] = dnde100_index\n src_dict['dnde1000_index'] = dnde1000_index\n src_dict['dnde10000_index'] = dnde10000_index\n\n except Exception:\n self.logger.error('Failed to update source parameters.',\n exc_info=True)\n\n # Only compute TS, errors, and ULs if the source was free in\n # the fit\n if not self.get_free_source_params(name) or paramsonly:\n return src_dict\n\n emax = 10 ** 5.5\n\n try:\n src_dict['flux_err'] = self.like.fluxError(name,\n self.energies[0],\n self.energies[-1])\n src_dict['flux100_err'] = self.like.fluxError(name, 100., emax)\n src_dict['flux1000_err'] = self.like.fluxError(name, 1000., emax)\n src_dict['flux10000_err'] = self.like.fluxError(name, 10000., emax)\n src_dict['eflux_err'] = \\\n self.like.energyFluxError(name, self.energies[0],\n self.energies[-1])\n src_dict['eflux100_err'] = self.like.energyFluxError(name, 100.,\n emax)\n src_dict['eflux1000_err'] = self.like.energyFluxError(name, 1000.,\n emax)\n src_dict['eflux10000_err'] = self.like.energyFluxError(name, 10000.,\n emax)\n\n except Exception:\n pass\n # self.logger.error('Failed to update source parameters.',\n # exc_info=True)\n lnlp = self.profile_norm(name, savestate=True,\n reoptimize=reoptimize, npts=npts,\n optimizer=optimizer)\n\n src_dict['loglike_scan'] = lnlp['loglike']\n src_dict['dloglike_scan'] = lnlp['dloglike']\n src_dict['eflux_scan'] = lnlp['eflux']\n src_dict['flux_scan'] = lnlp['flux']\n src_dict['norm_scan'] = lnlp['xvals']\n src_dict['loglike'] = np.max(lnlp['loglike'])\n\n flux_ul_data = utils.get_parameter_limits(\n lnlp['flux'], lnlp['dloglike'])\n eflux_ul_data = utils.get_parameter_limits(\n lnlp['eflux'], lnlp['dloglike'])\n\n if normPar.getValue() == 0:\n normPar.setValue(1.0)\n flux = self.like.flux(name, self.energies[0], self.energies[-1])\n flux100 = self.like.flux(name, 100., emax)\n flux1000 = self.like.flux(name, 1000., emax)\n flux10000 = self.like.flux(name, 10000., emax)\n eflux = self.like.energyFlux(name, self.energies[0],\n self.energies[-1])\n eflux100 = self.like.energyFlux(name, 100., emax)\n eflux1000 = self.like.energyFlux(name, 1000., emax)\n eflux10000 = self.like.energyFlux(name, 10000., emax)\n\n flux100_ratio = flux100 / flux\n flux1000_ratio = flux1000 / flux\n flux10000_ratio = flux10000 / flux\n eflux100_ratio = eflux100 / eflux\n eflux1000_ratio = eflux1000 / eflux\n eflux10000_ratio = eflux10000 / eflux\n normPar.setValue(0.0)\n else:\n flux100_ratio = src_dict['flux100'] / src_dict['flux']\n flux1000_ratio = src_dict['flux1000'] / src_dict['flux']\n flux10000_ratio = src_dict['flux10000'] / src_dict['flux']\n\n eflux100_ratio = src_dict['eflux100'] / src_dict['eflux']\n eflux1000_ratio = src_dict['eflux1000'] / src_dict['eflux']\n eflux10000_ratio = src_dict['eflux10000'] / src_dict['eflux']\n\n src_dict['flux_ul95'] = flux_ul_data['ul']\n src_dict['flux100_ul95'] = flux_ul_data['ul'] * flux100_ratio\n src_dict['flux1000_ul95'] = flux_ul_data['ul'] * flux1000_ratio\n src_dict['flux10000_ul95'] = flux_ul_data['ul'] * flux10000_ratio\n\n src_dict['eflux_ul95'] = eflux_ul_data['ul']\n src_dict['eflux100_ul95'] = eflux_ul_data['ul'] * eflux100_ratio\n src_dict['eflux1000_ul95'] = eflux_ul_data['ul'] * eflux1000_ratio\n src_dict['eflux10000_ul95'] = eflux_ul_data['ul'] * eflux10000_ratio\n\n # Extract covariance matrix\n fd = None\n try:\n fd = FluxDensity.FluxDensity(self.like, name)\n src_dict['covar'] = fd.covar\n except RuntimeError:\n pass\n # if ex.message == 'Covariance matrix has not been\n # computed.':\n\n # Extract bowtie\n if fd and len(src_dict['covar']) and src_dict['covar'].ndim >= 1:\n loge = np.linspace(self.log_energies[0],\n self.log_energies[-1], 50)\n src_dict['model_flux'] = self.bowtie(name, fd=fd, loge=loge)\n src_dict['dnde100_err'] = fd.error(100.)\n src_dict['dnde1000_err'] = fd.error(1000.)\n src_dict['dnde10000_err'] = fd.error(10000.)\n\n src_dict['pivot_energy'] = src_dict['model_flux']['pivot_energy']\n\n e0 = src_dict['pivot_energy']\n src_dict['dnde'] = self.like[name].spectrum()(pyLike.dArg(e0))\n src_dict['dnde_err'] = fd.error(e0)\n\n if not reoptimize:\n src_dict['ts'] = self.like.Ts2(name, reoptimize=reoptimize)\n else:\n src_dict['ts'] = -2.0 * lnlp['dloglike'][0]\n\n return src_dict"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef compute_srcprob(self,xmlfile=None, overwrite=False):\n\n for i,c in enumerate(self.components):\n # compute diffuse response, necessary for srcprob\n c._diffrsp_app(xmlfile=xmlfile)\n # compute srcprob\n c._srcprob_app(xmlfile = xmlfile, overwrite = overwrite)", "response": "Run the gtsrcprob app with the current model or a user provided xmlfile"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nadd a new source to the model.", "response": "def add_source(self, name, src_dict, free=None, save_source_maps=True,\n use_pylike=True, use_single_psf=False):\n \"\"\"Add a new source to the model. Source properties\n (spectrum, spatial model) are set with the src_dict argument.\n\n\n Parameters\n ----------\n\n name : str\n Source name.\n\n src_dict : dict or `~fermipy.roi_model.Source` object\n Dictionary or Source object defining the properties of the\n source.\n\n free : bool\n Initialize the source with the normalization parameter free.\n\n save_source_maps : bool\n Write the source map for this source to the source maps file.\n\n use_pylike : bool\n\n use_single_psf : bool\n\n \"\"\"\n\n # if self.roi.has_source(name):\n # msg = 'Source %s already exists.' % name\n # self.logger.error(msg)\n # raise Exception(msg)\n\n srcmap_utils.delete_source_map(self.files['srcmap'], name)\n\n src = self.roi[name]\n if self.config['gtlike']['expscale'] is not None and \\\n name not in self._src_expscale:\n self._src_expscale[name] = self.config['gtlike']['expscale']\n\n if self._like is None:\n return\n\n if not use_pylike:\n self._update_srcmap_file([src], True)\n\n pylike_src = self._create_source(src)\n\n # Initialize source as free/fixed\n if free is not None:\n pylike_src.spectrum().normPar().setFree(free)\n\n if hasattr(pyLike, 'PsfIntegConfig') and \\\n hasattr(pyLike.PsfIntegConfig, 'set_use_single_psf'):\n config = pyLike.BinnedLikeConfig(self.like.logLike.config())\n config.psf_integ_config().set_use_single_psf(use_single_psf)\n self.like.addSource(pylike_src, config)\n else:\n self.like.addSource(pylike_src)\n\n self.like.syncSrcParams(str(name))\n self.like.logLike.buildFixedModelWts()\n if save_source_maps and \\\n not self.config['gtlike']['use_external_srcmap']:\n self.like.logLike.saveSourceMaps(str(self.files['srcmap']))\n\n self.set_exposure_scale(name)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncreates a pyLikelihood Source object from a single source object.", "response": "def _create_source(self, src):\n \"\"\"Create a pyLikelihood Source object from a\n `~fermipy.roi_model.Model` object.\"\"\"\n\n if src['SpatialType'] == 'SkyDirFunction':\n pylike_src = pyLike.PointSource(self.like.logLike.observation())\n pylike_src.setDir(src.skydir.ra.deg, src.skydir.dec.deg, False,\n False)\n elif src['SpatialType'] == 'SpatialMap':\n filepath = str(utils.path_to_xmlpath(src['Spatial_Filename']))\n sm = pyLike.SpatialMap(filepath)\n pylike_src = pyLike.DiffuseSource(sm,\n self.like.logLike.observation(),\n False)\n elif src['SpatialType'] == 'RadialProfile':\n filepath = str(utils.path_to_xmlpath(src['Spatial_Filename']))\n sm = pyLike.RadialProfile(filepath)\n sm.setCenter(src['ra'], src['dec'])\n pylike_src = pyLike.DiffuseSource(sm,\n self.like.logLike.observation(),\n False)\n elif src['SpatialType'] == 'RadialGaussian':\n sm = pyLike.RadialGaussian(src.skydir.ra.deg, src.skydir.dec.deg,\n src.spatial_pars['Sigma']['value'])\n pylike_src = pyLike.DiffuseSource(sm,\n self.like.logLike.observation(),\n False)\n\n elif src['SpatialType'] == 'RadialDisk':\n sm = pyLike.RadialDisk(src.skydir.ra.deg, src.skydir.dec.deg,\n src.spatial_pars['Radius']['value'])\n pylike_src = pyLike.DiffuseSource(sm,\n self.like.logLike.observation(),\n False)\n\n elif src['SpatialType'] == 'MapCubeFunction':\n filepath = str(utils.path_to_xmlpath(src['Spatial_Filename']))\n mcf = pyLike.MapCubeFunction2(filepath)\n pylike_src = pyLike.DiffuseSource(mcf,\n self.like.logLike.observation(),\n False)\n else:\n raise Exception('Unrecognized spatial type: %s',\n src['SpatialType'])\n\n if src['SpectrumType'] == 'FileFunction':\n fn = gtutils.create_spectrum_from_dict(src['SpectrumType'],\n src.spectral_pars)\n\n file_function = pyLike.FileFunction_cast(fn)\n filename = str(os.path.expandvars(src['Spectrum_Filename']))\n file_function.readFunction(filename)\n elif src['SpectrumType'] == 'DMFitFunction':\n\n fn = pyLike.DMFitFunction()\n fn = gtutils.create_spectrum_from_dict(src['SpectrumType'],\n src.spectral_pars, fn)\n filename = str(os.path.expandvars(src['Spectrum_Filename']))\n fn.readFunction(filename)\n else:\n fn = gtutils.create_spectrum_from_dict(src['SpectrumType'],\n src.spectral_pars)\n\n pylike_src.setSpectrum(fn)\n pylike_src.setName(str(src.name))\n\n return pylike_src"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef set_exposure_scale(self, name, scale=None):\n name = self.roi.get_source_by_name(name).name\n if scale is None and name not in self._src_expscale:\n return\n elif scale is None:\n scale = self._src_expscale.get(name, 1.0)\n else:\n self._src_expscale[name] = scale\n self._scale_srcmap({name: scale})", "response": "Set the exposure correction of a source."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef set_edisp_flag(self, name, flag=True):\n src = self.roi.get_source_by_name(name)\n name = src.name\n self.like[name].src.set_edisp_flag(flag)", "response": "Enable or disable the energy dispersion correction for a specific source."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef set_energy_range(self, logemin, logemax):\n\n if logemin is None:\n logemin = self.log_energies[0]\n\n if logemax is None:\n logemax = self.log_energies[-1]\n\n imin = int(utils.val_to_edge(self.log_energies, logemin)[0])\n imax = int(utils.val_to_edge(self.log_energies, logemax)[0])\n\n if imin - imax == 0:\n imin = int(len(self.log_energies) - 1)\n imax = int(len(self.log_energies) - 1)\n\n klims = self.like.logLike.klims()\n if imin != klims[0] or imax != klims[1]:\n self.like.selectEbounds(imin, imax)\n\n return np.array([self.log_energies[imin], self.log_energies[imax]])", "response": "Set the energy range of the analysis."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef counts_map(self):\n try:\n if isinstance(self.like, gtutils.SummedLikelihood):\n cmap = self.like.components[0].logLike.countsMap()\n p_method = cmap.projection().method()\n else:\n cmap = self.like.logLike.countsMap()\n p_method = cmap.projection().method()\n except Exception:\n p_method = 0\n\n if p_method == 0: # WCS\n z = cmap.data()\n z = np.array(z).reshape(self.enumbins, self.npix, self.npix)\n return WcsNDMap(copy.deepcopy(self.geom), z)\n elif p_method == 1: # HPX\n z = cmap.data()\n z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix))\n return HpxNDMap(copy.deepcopy(self.geom), z)\n else:\n self.logger.error('Did not recognize CountsMap type %i' % p_method,\n exc_info=True)\n return None", "response": "Return 3 - D counts map for this component as a Map object."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn 3 - D weights map for this component as a Map object.", "response": "def weight_map(self):\n \"\"\"Return 3-D weights map for this component as a Map object.\n\n Returns\n -------\n map : `~fermipy.skymap.MapBase`\n\n \"\"\"\n # EAC we need the try blocks b/c older versions of the ST don't have some of these functions\n if isinstance(self.like, gtutils.SummedLikelihood):\n cmap = self.like.components[0].logLike.countsMap()\n try:\n p_method = cmap.projection().method()\n except AttributeError:\n p_method = 0\n try:\n if self.like.components[0].logLike.has_weights():\n wmap = self.like.components[0].logLike.weightMap()\n else:\n wmap = None\n except Exception:\n wmap = None\n else:\n cmap = self.like.logLike.countsMap()\n try:\n p_method = cmap.projection().method()\n except AttributeError:\n p_method = 0\n try:\n if self.like.logLike.has_weights():\n wmap = self.like.logLike.weightMap()\n else:\n wmap = None\n except Exception:\n wmap = None\n\n if p_method == 0: # WCS\n if wmap is None:\n z = np.ones((self.enumbins, self.npix, self.npix))\n else:\n z = wmap.model()\n z = np.array(z).reshape(self.enumbins, self.npix, self.npix)\n return WcsNDMap(copy.deepcopy(self._geom), z)\n elif p_method == 1: # HPX\n nhpix = np.max(self.geom.npix)\n if wmap is None:\n z = np.ones((self.enumbins, nhpix))\n else:\n z = wmap.model()\n z = np.array(z).reshape(self.enumbins, nhpix)\n return HpxNDMap(self.geom, z)\n else:\n self.logger.error('Did not recognize CountsMap type %i' % p_method,\n exc_info=True)\n return None"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef model_counts_map(self, name=None, exclude=None, use_mask=False):\n if self.projtype == \"WCS\":\n v = pyLike.FloatVector(self.npix ** 2 * self.enumbins)\n elif self.projtype == \"HPX\":\n v = pyLike.FloatVector(np.max(self.geom.npix) * self.enumbins)\n else:\n raise Exception(\"Unknown projection type %s\", self.projtype)\n\n exclude = utils.arg_to_list(exclude)\n names = utils.arg_to_list(name)\n\n excluded_names = []\n for i, t in enumerate(exclude):\n srcs = self.roi.get_sources_by_name(t)\n excluded_names += [s.name for s in srcs]\n\n if not hasattr(self.like.logLike, 'loadSourceMaps'):\n # Update fixed model\n self.like.logLike.buildFixedModelWts()\n # Populate source map hash\n self.like.logLike.buildFixedModelWts(True)\n elif (name is None or name == 'all') and not exclude:\n self.like.logLike.loadSourceMaps()\n\n src_names = []\n if (name is None) or (name == 'all'):\n src_names = [src.name for src in self.roi.sources]\n elif name == 'diffuse':\n src_names = [src.name for src in self.roi.sources if\n src.diffuse]\n else:\n srcs = [self.roi.get_source_by_name(t) for t in names]\n src_names = [src.name for src in srcs]\n\n # Remove sources in exclude list\n src_names = [str(t) for t in src_names if t not in excluded_names]\n\n # EAC we need the try blocks b/c older versions of the ST don't have some of these functions\n if len(src_names) == len(self.roi.sources):\n try:\n self.like.logLike.computeModelMap(v, use_mask)\n except (TypeError, NotImplementedError):\n self.like.logLike.computeModelMap(v)\n elif not hasattr(self.like.logLike, 'setSourceMapImage'):\n for s in src_names:\n model = self.like.logLike.sourceMap(str(s))\n try:\n self.like.logLike.updateModelMap(v, model, use_mask)\n except (TypeError, NotImplementedError):\n self.like.logLike.updateModelMap(v, model)\n else:\n try:\n if hasattr(self.like.logLike, 'has_weights'):\n self.like.logLike.computeModelMap(src_names, v, use_mask)\n else:\n self.like.logLike.computeModelMap(src_names, v)\n except:\n vsum = np.zeros(v.size())\n for s in src_names:\n vtmp = pyLike.FloatVector(v.size())\n if hasattr(self.like.logLike, 'has_weights'):\n self.like.logLike.computeModelMap(\n str(s), vtmp, use_mask)\n else:\n self.like.logLike.computeModelMap(str(s), vtmp)\n vsum += vtmp\n v = pyLike.FloatVector(vsum)\n\n if self.projtype == \"WCS\":\n z = np.array(v).reshape(self.enumbins, self.npix, self.npix)\n return WcsNDMap(copy.deepcopy(self._geom), z)\n elif self.projtype == \"HPX\":\n z = np.array(v).reshape(self.enumbins, np.max(self._geom.npix))\n return HpxNDMap(copy.deepcopy(self._geom), z)\n else:\n raise Exception(\n \"Did not recognize projection type %s\", self.projtype)", "response": "Returns the model expectation map for a single source a set of sources or all sources in the ROI."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef model_counts_spectrum(self, name, logemin, logemax, weighted=False):\n # EAC, we need this b/c older version of the ST don't have the right signature\n try:\n cs = np.array(self.like.logLike.modelCountsSpectrum(\n str(name), weighted))\n except (TypeError, NotImplementedError):\n cs = np.array(self.like.logLike.modelCountsSpectrum(str(name)))\n imin = utils.val_to_edge(self.log_energies, logemin)[0]\n imax = utils.val_to_edge(self.log_energies, logemax)[0]\n if imax <= imin:\n raise Exception('Invalid energy range.')\n return cs[imin:imax]", "response": "Return the model counts spectrum of a source."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nrunning pre-processing step for this component. This will generate all of the auxiliary files needed to instantiate a likelihood object. By default this function will skip any steps for which the output file already exists. Parameters ---------- overwrite : bool Run all pre-processing steps even if the output file of that step is present in the working directory.", "response": "def setup(self, overwrite=False, **kwargs):\n \"\"\"Run pre-processing step for this component. This will\n generate all of the auxiliary files needed to instantiate a\n likelihood object. By default this function will skip any\n steps for which the output file already exists.\n\n Parameters\n ----------\n overwrite : bool\n\n Run all pre-processing steps even if the output file of\n that step is present in the working directory.\n \"\"\"\n\n loglevel = kwargs.get('loglevel', self.loglevel)\n\n self.logger.log(loglevel, 'Running setup for component %s',\n self.name)\n\n use_external_srcmap = self.config['gtlike']['use_external_srcmap']\n\n # Run data selection\n if not use_external_srcmap:\n self._select_data(overwrite=overwrite, **kwargs)\n\n # Create LT Cube\n if self._ext_ltcube is not None:\n self.logger.log(loglevel, 'Using external LT cube.')\n else:\n self._create_ltcube(overwrite=overwrite, **kwargs)\n\n self.logger.debug('Loading LT Cube %s', self.files['ltcube'])\n self._ltc = LTCube.create(self.files['ltcube'])\n\n # Extract tmin, tmax from LT cube\n self._tmin = self._ltc.tstart\n self._tmax = self._ltc.tstop\n\n self.logger.debug('Creating PSF model')\n self._psf = irfs.PSFModel.create(self.roi.skydir, self._ltc,\n self.config['gtlike']['irfs'],\n self.config['selection']['evtype'],\n self.energies)\n\n # Bin data and create exposure cube\n if not use_external_srcmap:\n self._bin_data(overwrite=overwrite, **kwargs)\n self._create_expcube(overwrite=overwrite, **kwargs)\n\n # This is needed in case the exposure map is in HEALPix\n hpxhduname = \"HPXEXPOSURES\"\n try:\n self._bexp = Map.read(self.files['bexpmap'], hdu=hpxhduname)\n except KeyError:\n self._bexp = Map.read(self.files['bexpmap'])\n\n # Write ROI XML\n self.roi.write_xml(self.files['srcmdl'], self.config['model'])\n\n # Create source maps file\n if not use_external_srcmap:\n self._create_srcmaps(overwrite=overwrite)\n\n if not self.config['data']['cacheft1'] and os.path.isfile(self.files['ft1']):\n self.logger.debug('Deleting FT1 file.')\n os.remove(self.files['ft1'])\n\n self.logger.log(loglevel, 'Finished setup for component %s',\n self.name)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _scale_srcmap(self, scale_map, check_header=True, names=None):\n\n srcmap = fits.open(self.files['srcmap'])\n\n for hdu in srcmap[1:]:\n if hdu.name not in scale_map:\n continue\n if names is not None and hdu.name not in names:\n continue\n\n scale = scale_map[hdu.name]\n if scale < 1e-20:\n self.logger.warning(\n \"The expscale parameter was zero, setting it to 1e-8\")\n scale = 1e-8\n if 'EXPSCALE' in hdu.header and check_header:\n old_scale = hdu.header['EXPSCALE']\n else:\n old_scale = 1.0\n hdu.data *= scale / old_scale\n hdu.header['EXPSCALE'] = (scale,\n 'Exposure correction applied to this map')\n\n srcmap.writeto(self.files['srcmap'], overwrite=True)\n srcmap.close()\n\n # Force reloading the map from disk\n for name in scale_map.keys():\n self.like.logLike.eraseSourceMap(str(name))\n self.like.logLike.buildFixedModelWts()", "response": "Apply exposure corrections to the source map file."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmakes an exposure cube with the same binning as the counts map.", "response": "def _make_scaled_srcmap(self):\n \"\"\"Make an exposure cube with the same binning as the counts map.\"\"\"\n\n self.logger.info('Computing scaled source map.')\n\n bexp0 = fits.open(self.files['bexpmap_roi'])\n bexp1 = fits.open(self.config['gtlike']['bexpmap'])\n srcmap = fits.open(self.config['gtlike']['srcmap'])\n\n if bexp0[0].data.shape != bexp1[0].data.shape:\n raise Exception('Wrong shape for input exposure map file.')\n\n bexp_ratio = bexp0[0].data / bexp1[0].data\n\n self.logger.info(\n 'Min/Med/Max exposure correction: %f %f %f' % (np.min(bexp_ratio),\n np.median(\n bexp_ratio),\n np.max(bexp_ratio)))\n\n for hdu in srcmap[1:]:\n\n if hdu.name == 'GTI':\n continue\n if hdu.name == 'EBOUNDS':\n continue\n hdu.data *= bexp_ratio\n\n srcmap.writeto(self.files['srcmap'], overwrite=True)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nsimulating the whole ROI or inject a simulation of one or more model components into the data.", "response": "def simulate_roi(self, name=None, clear=True, randomize=True):\n \"\"\"Simulate the whole ROI or inject a simulation of one or\n more model components into the data.\n\n Parameters\n ----------\n name : str\n Name of the model component to be simulated. If None then\n the whole ROI will be simulated.\n\n clear : bool\n Zero the current counts map before injecting the simulation.\n\n randomize : bool\n Fill with each pixel with random values drawn from a\n poisson distribution. If false then fill each pixel with\n the counts expectation value.\n\n \"\"\"\n cm = self.counts_map()\n data = cm.data\n m = self.model_counts_map(name) \n\n if clear:\n data.fill(0.0)\n\n if randomize:\n if m.data.min()<0.:\n self.logger.warning('At least on negative value found in model map.'\n ' Changing it/them to 0')\n indexcond = np.where( m.data <0. )\n m.data[indexcond]=np.zeros(len(m.data[indexcond]))\n \n data += np.random.poisson(m.data).astype(float)\n else:\n data += m.data\n\n if hasattr(self.like.logLike, 'setCountsMap'):\n self.like.logLike.setCountsMap(np.ravel(data))\n\n srcmap_utils.update_source_maps(self.files['srcmap'],\n {'PRIMARY': data},\n logger=self.logger)\n cm.write(self.files['ccubemc'], overwrite=True, conv='fgst-ccube')"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef write_model_map(self, model_name=None, name=None):\n\n if model_name is None:\n suffix = self.config['file_suffix']\n else:\n suffix = '_%s%s' % (model_name, self.config['file_suffix'])\n\n self.logger.info('Generating model map for component %s.', self.name)\n\n outfile = os.path.join(self.config['fileio']['workdir'],\n 'mcube%s.fits' % (suffix))\n\n cmap = self.model_counts_map(name, use_mask=False)\n cmap.write(outfile, overwrite=True, conv='fgst-ccube')\n return cmap", "response": "Save counts model map to a FITS file."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nsaving counts model map to a FITS file.", "response": "def write_weight_map(self, model_name=None):\n \"\"\"Save counts model map to a FITS file.\n\n \"\"\"\n\n if model_name is None:\n suffix = self.config['file_suffix']\n else:\n suffix = '_%s%s' % (model_name, self.config['file_suffix'])\n\n self.logger.info('Generating model map for component %s.', self.name)\n\n outfile = os.path.join(self.config['fileio']['workdir'],\n 'wcube%s.fits' % (suffix))\n\n wmap = self.weight_map()\n wmap.write(outfile, overwrite=True, conv='fgst-ccube')\n return wmap"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nupdate the contents of the source map file and generate source maps for any components that are not present.", "response": "def _update_srcmap_file(self, sources, overwrite=True):\n \"\"\"Check the contents of the source map file and generate\n source maps for any components that are not present.\"\"\"\n\n if not os.path.isfile(self.files['srcmap']):\n return\n\n hdulist = fits.open(self.files['srcmap'])\n hdunames = [hdu.name.upper() for hdu in hdulist]\n\n srcmaps = {}\n\n for src in sources:\n\n if src.name.upper() in hdunames and not overwrite:\n continue\n self.logger.debug('Creating source map for %s', src.name)\n srcmaps[src.name] = self._create_srcmap(src.name, src)\n\n if srcmaps:\n self.logger.debug(\n 'Updating source map file for component %s.', self.name)\n srcmap_utils.update_source_maps(self.files['srcmap'], srcmaps,\n logger=self.logger)\n hdulist.close()"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ngenerate the source map for a source.", "response": "def _create_srcmap(self, name, src, **kwargs):\n \"\"\"Generate the source map for a source.\"\"\"\n\n psf_scale_fn = kwargs.get('psf_scale_fn', None)\n skydir = src.skydir\n spatial_model = src['SpatialModel']\n spatial_width = src['SpatialWidth']\n xpix, ypix = self.geom.to_image().coord_to_pix(skydir)\n exp = self._bexp.interp_by_coord(\n (skydir, self._bexp.geom.axes[0].center))\n cache = self._srcmap_cache.get(name, None)\n if cache is not None:\n k = cache.create_map([ypix, xpix])\n else:\n k = srcmap_utils.make_srcmap(self._psf, exp, spatial_model,\n spatial_width,\n npix=self.npix, xpix=xpix, ypix=ypix,\n cdelt=self.config['binning']['binsz'],\n psf_scale_fn=psf_scale_fn,\n sparse=True)\n\n return k"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nupdate the source map for an existing source in memory.", "response": "def _update_srcmap(self, name, src, **kwargs):\n \"\"\"Update the source map for an existing source in memory.\"\"\"\n\n k = self._create_srcmap(name, src, **kwargs)\n scale = self._src_expscale.get(name, 1.0)\n k *= scale\n\n # Force the source map to be cached\n # FIXME: No longer necessary to force cacheing in ST after 11-05-02\n self.like.logLike.sourceMap(str(name)).model()\n self.like.logLike.setSourceMapImage(str(name), np.ravel(k))\n self.like.logLike.sourceMap(str(name)).model()\n\n normPar = self.like.normPar(name) \n if not normPar.isFree():\n self.like.logLike.buildFixedModelWts()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngenerating a counts model map from an XML model file using the GT model.", "response": "def generate_model(self, model_name=None, outfile=None):\n \"\"\"Generate a counts model map from an XML model file using\n gtmodel.\n\n Parameters\n ----------\n model_name : str\n Name of the model. If no name is given it will use the\n baseline model.\n\n outfile : str\n Override the name of the output model file.\n \"\"\"\n\n if model_name is not None:\n model_name = os.path.splitext(model_name)[0]\n\n if model_name is None or model_name == '':\n srcmdl = self.files['srcmdl']\n else:\n srcmdl = self.get_model_path(model_name)\n\n if not os.path.isfile(srcmdl):\n raise Exception(\"Model file does not exist: %s\", srcmdl)\n\n if model_name is None:\n suffix = self.config['file_suffix']\n else:\n suffix = '_%s%s' % (model_name, self.config['file_suffix'])\n\n outfile = os.path.join(self.config['fileio']['workdir'],\n 'mcube%s.fits' % (suffix))\n\n # May consider generating a custom source model file\n if not os.path.isfile(outfile):\n\n kw = dict(srcmaps=self.files['srcmap'],\n srcmdl=srcmdl,\n bexpmap=self.files['bexpmap'],\n outfile=outfile,\n expcube=self.files['ltcube'],\n irfs=self.config['gtlike']['irfs'],\n evtype=self.config['selection']['evtype'],\n edisp=bool(self.config['gtlike']['edisp']),\n outtype='ccube',\n chatter=self.config['logging']['chatter'])\n\n run_gtapp('gtmodel', self.logger, kw)\n else:\n self.logger.info('Skipping gtmodel')"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef write_xml(self, xmlfile):\n\n xmlfile = self.get_model_path(xmlfile)\n self.logger.info('Writing %s...', xmlfile)\n self.like.writeXml(str(xmlfile))", "response": "Write the XML model for this analysis component."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ninfer the path to the XML model name.", "response": "def get_model_path(self, name):\n \"\"\"Infer the path to the XML model name.\"\"\"\n\n name, ext = os.path.splitext(name)\n ext = '.xml'\n xmlfile = name + self.config['file_suffix'] + ext\n xmlfile = utils.resolve_path(xmlfile,\n workdir=self.config['fileio']['workdir'])\n\n return xmlfile"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _tscube_app(self, xmlfile):\n\n xmlfile = self.get_model_path(xmlfile)\n\n outfile = os.path.join(self.config['fileio']['workdir'],\n 'tscube%s.fits' % (self.config['file_suffix']))\n\n kw = dict(cmap=self.files['ccube'],\n expcube=self.files['ltcube'],\n bexpmap=self.files['bexpmap'],\n irfs=self.config['gtlike']['irfs'],\n evtype=self.config['selection']['evtype'],\n srcmdl=xmlfile,\n nxpix=self.npix, nypix=self.npix,\n binsz=self.config['binning']['binsz'],\n xref=float(self.roi.skydir.ra.deg),\n yref=float(self.roi.skydir.dec.deg),\n proj=self.config['binning']['proj'],\n stlevel=0,\n coordsys=self.config['binning']['coordsys'],\n outfile=outfile)\n\n run_gtapp('gttscube', self.logger, kw)", "response": "Run gttscube as an application."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncompute the diffuse response", "response": "def _diffrsp_app(self,xmlfile=None, **kwargs):\n \"\"\"\n Compute the diffuse response \n \"\"\"\n loglevel = kwargs.get('loglevel', self.loglevel)\n\n self.logger.log(loglevel, 'Computing diffuse repsonce for component %s.',\n self.name)\n\n # set the srcmdl\n srcmdl_file = self.files['srcmdl']\n if xmlfile is not None:\n srcmdl_file = self.get_model_path(xmlfile)\n\n kw = dict(evfile=self.files['ft1'],\n scfile=self.data_files['scfile'],\n irfs = self.config['gtlike']['irfs'],\n evtype = self.config['selection']['evtype'],\n srcmdl = srcmdl_file)\n\n run_gtapp('gtdiffrsp', self.logger, kw, loglevel=loglevel)\n return"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _srcprob_app(self,xmlfile=None, overwrite=False, **kwargs):\n\n loglevel = kwargs.get('loglevel', self.loglevel)\n\n self.logger.log(loglevel, 'Computing src probability for component %s.',\n self.name)\n\n # set the srcmdl\n srcmdl_file = self.files['srcmdl']\n if xmlfile is not None:\n srcmdl_file = self.get_model_path(xmlfile)\n\n # set the outfile\n # it's defined here and not in self.files dict\n # so that it is copied with the stage_output module\n # even if savefits is False\n outfile = os.path.join(self.workdir, \n 'ft1_srcprob{0[file_suffix]:s}.fits'.format(self.config))\n\n kw = dict(evfile=self.files['ft1'],\n scfile=self.data_files['scfile'],\n outfile= outfile,\n irfs = self.config['gtlike']['irfs'],\n srcmdl = srcmdl_file)\n self.logger.debug(kw)\n\n # run gtapp for the srcprob\n if os.path.isfile(outfile) and not overwrite:\n self.logger.info('Skipping gtsrcprob')\n else:\n run_gtapp('gtsrcprob', self.logger, kw, loglevel=loglevel)", "response": "Run srcprob for an analysis component as an application."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nremoving null items from a dictionary", "response": "def purge_dict(idict):\n \"\"\"Remove null items from a dictionary \"\"\"\n odict = {}\n for key, val in idict.items():\n if is_null(val):\n continue\n odict[key] = val\n return odict"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef main(cls):\n chain = cls.create()\n args = chain._run_argparser(sys.argv[1:])\n chain._run_chain(sys.stdout, args.dry_run)\n chain._finalize(args.dry_run)", "response": "Hook to run this Chain from the command line"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _latch_file_info(self):\n self._map_arguments(self.args)\n self.files.latch_file_info(self.args)\n self.sub_files.file_dict.clear()\n self.sub_files.update(self.files.file_dict)\n for link in self._links.values():\n self.sub_files.update(link.files.file_dict)\n self.sub_files.update(link.sub_files.file_dict)", "response": "Internal function to update the dictionaries holding the input and output files and the output files."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _set_link(self, linkname, cls, **kwargs):\n val_copy = purge_dict(kwargs.copy())\n sub_link_prefix = val_copy.pop('link_prefix', '')\n link_prefix = self.link_prefix + sub_link_prefix\n create_args = dict(linkname=linkname,\n link_prefix=link_prefix,\n job_archive=val_copy.pop('job_archive', None),\n file_stage=val_copy.pop('file_stage', None))\n job_args = val_copy\n if linkname in self._links:\n link = self._links[linkname]\n link.update_args(job_args)\n else:\n link = cls.create(**create_args)\n self._links[link.linkname] = link\n logfile_default = os.path.join('logs', '%s.log' % link.full_linkname)\n logfile = kwargs.setdefault('logfile', logfile_default)\n link._register_job(JobDetails.topkey, job_args,\n logfile, status=JobStatus.unknown)\n return link", "response": "Transfer options kwargs to a Link object if needed."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\npasses self. _job_archive along to links", "response": "def _set_links_job_archive(self):\n \"\"\"Pass self._job_archive along to links\"\"\"\n for link in self._links.values():\n link._job_archive = self._job_archive"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nrun all the links in the chain and returns the base class of the base class.", "response": "def _run_chain(self,\n stream=sys.stdout,\n dry_run=False,\n stage_files=True,\n force_run=False,\n resubmit_failed=False):\n \"\"\"Run all the links in the chain\n\n Parameters\n -----------\n stream : `file`\n Stream to print to,\n Must have 'write' function\n\n dry_run : bool\n Print commands but do not run them\n\n stage_files : bool\n Stage files to and from the scratch area\n\n force_run : bool\n Run jobs, even if they are marked as done\n\n resubmit_failed : bool\n Resubmit failed jobs\n\n \"\"\"\n self._set_links_job_archive()\n failed = False\n\n if self._file_stage is not None:\n input_file_mapping, output_file_mapping = self._map_scratch_files(\n self.sub_files)\n if stage_files:\n self._file_stage.make_scratch_dirs(input_file_mapping, dry_run)\n self._file_stage.make_scratch_dirs(\n output_file_mapping, dry_run)\n self._stage_input_files(input_file_mapping, dry_run)\n\n for link in self._links.values():\n logfile = os.path.join('logs', \"%s.log\" % link.full_linkname)\n link._archive_self(logfile, status=JobStatus.unknown)\n key = JobDetails.make_fullkey(link.full_linkname)\n if hasattr(link, 'check_status'):\n link.check_status(stream, no_wait=True,\n check_once=True, do_print=False)\n else:\n pass\n link_status = link.check_job_status(key)\n if link_status in [JobStatus.done]:\n if not force_run:\n print (\"Skipping done link\", link.full_linkname)\n continue\n elif link_status in [JobStatus.running]:\n if not force_run and not resubmit_failed:\n print (\"Skipping running link\", link.full_linkname)\n continue\n elif link_status in [JobStatus.failed,\n JobStatus.partial_failed]:\n if not resubmit_failed:\n print (\"Skipping failed link\", link.full_linkname)\n continue\n print (\"Running link \", link.full_linkname)\n link.run_with_log(dry_run=dry_run, stage_files=False,\n resubmit_failed=resubmit_failed)\n link_status = link.check_jobs_status()\n link._set_status_self(status=link_status)\n if link_status in [JobStatus.failed, JobStatus.partial_failed]:\n print (\"Stoping chain execution at failed link %s\" %\n link.full_linkname)\n failed = True\n break\n# elif link_status in [JobStatus.partial_failed]:\n# print (\"Resubmitting partially failed link %s\" %\n# link.full_linkname)\n# link.run_with_log(dry_run=dry_run, stage_files=False,\n# resubmit_failed=resubmit_failed)\n# link_status = link.check_jobs_status()\n# link._set_status_self(status=link_status)\n# if link_status in [JobStatus.partial_failed]:\n# print (\"Stoping chain execution: resubmission failed %s\" %\n# link.full_linkname)\n# failed = True\n# break\n\n if self._file_stage is not None and stage_files and not failed:\n self._stage_output_files(output_file_mapping, dry_run)\n\n chain_status = self.check_links_status()\n print (\"Chain status: %s\" % (JOB_STATUS_STRINGS[chain_status]))\n if chain_status == 5:\n job_status = 0\n else:\n job_status = -1\n self._write_status_to_log(job_status, stream)\n self._set_status_self(status=chain_status)\n\n if self._job_archive:\n self._job_archive.file_archive.update_file_status()\n self._job_archive.write_table_file()"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nclears all the jobs in this instance.", "response": "def clear_jobs(self, recursive=True):\n \"\"\"Clear a dictionary with all the jobs\n\n If recursive is True this will include jobs from all internal `Link`\n \"\"\"\n if recursive:\n for link in self._links.values():\n link.clear_jobs(recursive)\n self.jobs.clear()"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn a dictionary with all the jobs in this instance.", "response": "def get_jobs(self, recursive=True):\n \"\"\"Return a dictionary with all the jobs\n\n If recursive is True this will include jobs from all internal `Link`\n \"\"\"\n if recursive:\n ret_dict = self.jobs.copy()\n for link in self._links.values():\n ret_dict.update(link.get_jobs(recursive))\n return ret_dict\n return self.jobs"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef missing_input_files(self):\n ret_dict = OrderedDict()\n for link in self._links.values():\n link_dict = link.missing_input_files()\n for key, value in link_dict.items():\n try:\n ret_dict[key] += value\n except KeyError:\n ret_dict[key] = value\n return ret_dict", "response": "Make and return a dictionary of the missing input files. This returns a dictionary mapping the filepath to list of Link that use the file as input."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nchecking the status of all the jobs run from the ialog objects in this Chain and return a status flag that summarizes that.", "response": "def check_links_status(self,\n fail_running=False,\n fail_pending=False):\n \"\"\"\"Check the status of all the jobs run from the\n `Link` objects in this `Chain` and return a status\n flag that summarizes that.\n\n Parameters\n ----------\n\n fail_running : `bool`\n If True, consider running jobs as failed\n\n fail_pending : `bool`\n If True, consider pending jobs as failed\n\n Returns\n -------\n status : `JobStatus`\n Job status flag that summarizes the status of all the jobs,\n\n \"\"\"\n status_vector = JobStatusVector()\n for link in self._links.values():\n key = JobDetails.make_fullkey(link.full_linkname)\n link_status = link.check_job_status(key,\n fail_running=fail_running,\n fail_pending=fail_pending)\n status_vector[link_status] += 1\n\n return status_vector.get_status()"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef run(self, stream=sys.stdout, dry_run=False,\n stage_files=True, resubmit_failed=False):\n \"\"\"Runs this `Chain`.\n\n Parameters\n -----------\n stream : `file`\n Stream that this `Link` will print to,\n Must have 'write' function\n\n dry_run : bool\n Print command but do not run it.\n\n stage_files : bool\n Copy files to and from scratch staging area.\n\n resubmit_failed : bool\n Flag for sub-classes to resubmit failed jobs.\n\n \"\"\"\n self._run_chain(stream, dry_run, stage_files,\n resubmit_failed=resubmit_failed)", "response": "Runs this `Chain`.\n\n Parameters\n -----------\n stream : `file`\n Stream that this `Link` will print to,\n Must have 'write' function\n\n dry_run : bool\n Print command but do not run it.\n\n stage_files : bool\n Copy files to and from scratch staging area.\n\n resubmit_failed : bool\n Flag for sub-classes to resubmit failed jobs."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate the argument used to invoke the application.", "response": "def update_args(self, override_args):\n \"\"\"Update the argument used to invoke the application\n\n Note that this will also update the dictionary of input\n and output files.\n\n Parameters\n -----------\n override_args : dict\n dictionary passed to the links\n\n \"\"\"\n self.args = extract_arguments(override_args, self.args)\n self._map_arguments(self.args)\n\n scratch_dir = self.args.get('scratch', None)\n if is_not_null(scratch_dir):\n self._file_stage = FileStageManager(scratch_dir, '.')\n for link in self._links.values():\n link._set_file_stage(self._file_stage)\n self._latch_file_info()"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef print_status(self, indent=\"\", recurse=False):\n print (\"%s%30s : %15s : %20s\" %\n (indent, \"Linkname\", \"Link Status\", \"Jobs Status\"))\n for link in self._links.values():\n if hasattr(link, 'check_status'):\n status_vect = link.check_status(\n stream=sys.stdout, no_wait=True, do_print=False)\n else:\n status_vect = None\n key = JobDetails.make_fullkey(link.full_linkname)\n link_status = JOB_STATUS_STRINGS[link.check_job_status(key)]\n if status_vect is None:\n jobs_status = JOB_STATUS_STRINGS[link.check_jobs_status()]\n else:\n jobs_status = status_vect\n print (\"%s%30s : %15s : %20s\" %\n (indent, link.linkname, link_status, jobs_status))\n if hasattr(link, 'print_status') and recurse:\n print (\"---------- %30s -----------\" % link.linkname)\n link.print_status(indent + \" \", recurse=True)\n print (\"------------------------------------------------\")", "response": "Print a summary of the job status for each Link in this Chain"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef print_summary(self, stream=sys.stdout, indent=\"\", recurse_level=2):\n Link.print_summary(self, stream, indent, recurse_level)\n if recurse_level > 0:\n recurse_level -= 1\n indent += \" \"\n for link in self._links.values():\n stream.write(\"\\n\")\n link.print_summary(stream, indent, recurse_level)", "response": "Print a summary of the activity done by this Chain."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef register_classes():\n Gtlink_exphpsun.register_class()\n Gtlink_suntemp.register_class()\n Gtexphpsun_SG.register_class()\n Gtsuntemp_SG.register_class()\n SunMoonChain.register_class()", "response": "Register these classes with the LinkFactory"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef build_job_configs(self, args):\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n mktime = args['mktimefilter']\n\n base_config = dict(nxpix=args['nxpix'],\n nypix=args['nypix'],\n binsz=args['binsz'])\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n irf_ver=NAME_FACTORY.irf_ver(),\n coordsys=comp.coordsys,\n mktime=mktime,\n fullpath=True)\n outfile = NAME_FACTORY.bexpcube(**name_keys)\n ltcube = NAME_FACTORY.ltcube(**name_keys)\n full_config = base_config.copy()\n full_config.update(dict(infile=ltcube,\n outfile=outfile,\n irfs=NAME_FACTORY.irfs(**name_keys),\n evtype=comp.evtype,\n emin=comp.emin,\n emax=comp.emax,\n enumbins=comp.enumbins,\n logfile=make_nfs_path(outfile.replace('.fits', '.log'))))\n job_configs[key] = full_config\n\n return job_configs", "response": "Hook to build job configurations from the arguments passed in."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nhooks to build job configurations from the given arguments", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n mktime = args['mktimefilter']\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=mktime,\n fullpath=True)\n outfile = NAME_FACTORY.bexpcube_sun(**name_keys)\n ltcube_sun = NAME_FACTORY.ltcube_sun(**name_keys)\n job_configs[key] = dict(infile=NAME_FACTORY.ltcube_sun(**name_keys),\n outfile=outfile,\n irfs=NAME_FACTORY.irfs(**name_keys),\n evtype=comp.evtype,\n emin=comp.emin,\n emax=comp.emax,\n enumbins=comp.enumbins,\n logfile=make_nfs_path(outfile.replace('.fits', '.log')))\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nhooking to build job configurations from the arguments passed in.", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n # FIXME\n mktime = args['mktimefilter']\n\n for comp in components:\n for sourcekey in args['sourcekeys']:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}') + \"_%s\" % sourcekey\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n irf_ver=NAME_FACTORY.irf_ver(),\n sourcekey=sourcekey,\n mktime=mktime,\n coordsys=comp.coordsys,\n fullpath=True)\n outfile = NAME_FACTORY.template_sunmoon(**name_keys)\n job_configs[key] = dict(expsun=NAME_FACTORY.bexpcube_sun(**name_keys),\n avgexp=NAME_FACTORY.bexpcube(**name_keys),\n sunprof=NAME_FACTORY.angprofile(**name_keys),\n cmap='none',\n outfile=outfile,\n irfs=NAME_FACTORY.irfs(**name_keys),\n evtype=comp.evtype,\n emin=comp.emin,\n emax=comp.emax,\n enumbins=comp.enumbins,\n logfile=outfile.replace('.fits', '.log'))\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nmapping from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, input_dict):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n\n config_yaml = input_dict['config']\n config_dict = load_yaml(config_yaml)\n\n data = config_dict.get('data')\n comp = config_dict.get('comp')\n sourcekeys = config_dict.get('sourcekeys')\n\n mktimefilter = config_dict.get('mktimefilter')\n\n self._set_link('expcube2', Gtexpcube2wcs_SG,\n comp=comp, data=data,\n mktimefilter=mktimefilter)\n\n self._set_link('exphpsun', Gtexphpsun_SG,\n comp=comp, data=data,\n mktimefilter=mktimefilter)\n\n self._set_link('suntemp', Gtsuntemp_SG,\n comp=comp, data=data,\n mktimefilter=mktimefilter,\n sourcekeys=sourcekeys)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the information about a particular sub - component", "response": "def get_component_info(self, comp):\n \"\"\"Return the information about sub-component specific to a particular data selection\n\n Parameters\n ----------\n\n comp : `binning.Component` object\n Specifies the sub-component\n\n Returns `ModelComponentInfo` object\n \"\"\"\n if self.components is None:\n raise ValueError(\n 'Model component %s does not have sub-components' % self.sourcekey)\n if self.moving:\n comp_key = \"zmax%i\" % (comp.zmax)\n elif self.selection_dependent:\n comp_key = comp.make_key('{ebin_name}_{evtype_name}')\n else:\n raise ValueError(\n 'Model component %s is not moving or selection dependent' % self.sourcekey)\n return self.components[comp_key]"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef add_component_info(self, compinfo):\n if self.components is None:\n self.components = {}\n self.components[compinfo.comp_key] = compinfo", "response": "Add sub - component specific information to a particular data selection."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef clone_and_merge_sub(self, key):\n new_comp = copy.deepcopy(self)\n #sub_com = self.components[key]\n new_comp.components = None\n new_comp.comp_key = key\n return new_comp", "response": "Clones self and merges clone with sub - component specific information."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef add_columns(t0, t1):\n\n for colname in t1.colnames:\n col = t1.columns[colname]\n if colname in t0.columns:\n continue\n new_col = Column(name=col.name, length=len(t0), dtype=col.dtype) # ,\n # shape=col.shape)\n t0.add_column(new_col)", "response": "Add columns of table t1 to table t0."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nperform a join of two tables.", "response": "def join_tables(left, right, key_left, key_right,\n cols_right=None):\n \"\"\"Perform a join of two tables.\n\n Parameters\n ----------\n left : `~astropy.Table`\n Left table for join.\n\n right : `~astropy.Table`\n Right table for join.\n\n key_left : str\n Key used to match elements from ``left`` table.\n\n key_right : str\n Key used to match elements from ``right`` table.\n\n cols_right : list \n Subset of columns from ``right`` table that will be appended\n to joined table.\n\n \"\"\"\n right = right.copy()\n\n if cols_right is None:\n cols_right = right.colnames\n else:\n cols_right = [c for c in cols_right if c in right.colnames]\n\n if key_left != key_right:\n right[key_right].name = key_left\n\n if key_left not in cols_right:\n cols_right += [key_left]\n\n out = join(left, right[cols_right], keys=key_left,\n join_type='left')\n\n for col in out.colnames:\n if out[col].dtype.kind in ['S', 'U']:\n out[col].fill_value = ''\n elif out[col].dtype.kind in ['i']:\n out[col].fill_value = 0\n else:\n out[col].fill_value = np.nan\n\n return out.filled()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nstripping whitespace from string columns.", "response": "def strip_columns(tab):\n \"\"\"Strip whitespace from string columns.\"\"\"\n for colname in tab.colnames:\n if tab[colname].dtype.kind in ['S', 'U']:\n tab[colname] = np.core.defchararray.strip(tab[colname])"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef row_to_dict(row):\n o = {}\n for colname in row.colnames:\n\n if isinstance(row[colname], np.string_) and row[colname].dtype.kind in ['S', 'U']:\n o[colname] = str(row[colname])\n else:\n o[colname] = row[colname]\n\n return o", "response": "Convert a table row to a dictionary."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nhooking to build job configurations from the given arguments.", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n ret_dict = make_catalog_comp_dict(sources=args['library'], basedir='.')\n comp_info_dict = ret_dict['comp_info_dict']\n\n for split_ver, split_dict in comp_info_dict.items():\n for source_key, source_dict in split_dict.items():\n full_key = \"%s_%s\" % (split_ver, source_key)\n merged_name = \"%s_%s\" % (source_dict.catalog_info.catalog_name, source_key)\n if source_dict.model_type != 'CompositeSource':\n continue\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = \"%s_%s\" % (full_key, comp.make_key('{ebin_name}_{evtype_name}'))\n name_keys = dict(zcut=zcut,\n sourcekey=full_key,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n mktime='none',\n irf_ver=NAME_FACTORY.irf_ver())\n nested_name_keys = dict(zcut=zcut,\n sourcekey=source_dict.catalog_info.catalog_name,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n mktime='none',\n irf_ver=NAME_FACTORY.irf_ver())\n outfile = NAME_FACTORY.srcmaps(**name_keys)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[key] = dict(srcmaps=NAME_FACTORY.srcmaps(**nested_name_keys),\n expcube=NAME_FACTORY.ltcube(**name_keys),\n irfs=NAME_FACTORY.irfs(**name_keys),\n bexpmap=NAME_FACTORY.bexpcube(**name_keys),\n srcmdl=NAME_FACTORY.srcmdl_xml(**name_keys),\n merged=merged_name,\n outfile=outfile,\n outxml=NAME_FACTORY.nested_srcmdl_xml(**name_keys),\n logfile=logfile)\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef check_log(logfile, exited='Exited with exit code',\n successful='Successfully completed', exists=True):\n \"\"\" Often logfile doesn't exist because the job hasn't begun\n to run. It is unclear what you want to do in that case...\n\n Parameters\n ----------\n logfile : str\n String with path to logfile\n exists : bool\n Is the logfile required to exist\n exited : str\n String in logfile used to determine if a job exited.\n successful : str\n String in logfile used to determine if a job succeeded.\n\n \"\"\"\n if not os.path.exists(logfile):\n return not exists\n\n if exited in open(logfile).read():\n return 'Exited'\n elif successful in open(logfile).read():\n return 'Successful'\n else:\n return 'None'", "response": "Check if a log file exists and if it exists return None if it doesn t."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef dispatch_job(jobname, exe, args, opts, batch_opts, dry_run=True):\n\n batch_opts.setdefault('W', 300)\n batch_opts.setdefault('R', 'rhel60 && scratch > 10')\n\n cmd_opts = ''\n for k, v in opts.items():\n\n if isinstance(v, list):\n cmd_opts += ' '.join(['--%s=%s' % (k, t) for t in v])\n elif isinstance(v, bool) and v:\n cmd_opts += ' --%s ' % (k)\n elif isinstance(v, bool):\n continue\n elif v is not None:\n cmd_opts += ' --%s=\\\"%s\\\" ' % (k, v)\n\n bash_script = \"{exe} {args} {opts}\"\n scriptexe = jobname + '.sh'\n with open(os.path.join(scriptexe), 'wt') as f:\n f.write(bash_script.format(exe=exe, args=' '.join(args),\n opts=cmd_opts))\n\n batch_optstr = parse_lsf_opts(**batch_opts)\n batch_cmd = 'bsub %s ' % (batch_optstr)\n batch_cmd += ' bash %s' % scriptexe\n print(batch_cmd)\n if not dry_run:\n os.system(batch_cmd)", "response": "Dispatch an LSF job."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _make_input_file_list(binnedfile, num_files):\n outdir_base = os.path.abspath(os.path.dirname(binnedfile))\n outbasename = os.path.basename(binnedfile)\n filelist = \"\"\n for i in range(num_files):\n split_key = \"%06i\" % i\n output_dir = os.path.join(outdir_base, split_key)\n filepath = os.path.join(output_dir,\n outbasename.replace('.fits', '_%s.fits' % split_key))\n filelist += ' %s' % filepath\n return filelist", "response": "Make the list of input files for a particular energy bin X psf type"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n comp_file = args.get('comp', None)\n datafile = args.get('data', None)\n do_ltsum = args.get('do_ltsum', False)\n\n NAME_FACTORY.update_base_dict(datafile)\n outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')\n num_files = args.get('nfiles', 96)\n\n self.comp_dict = yaml.safe_load(open(comp_file))\n coordsys = self.comp_dict.pop('coordsys')\n\n for key_e, comp_e in sorted(self.comp_dict.items()):\n\n if 'mktimefilters' in comp_e:\n mktimelist = comp_e['mktimefilters']\n else:\n mktimelist = ['none']\n\n if 'evtclasses' in comp_e:\n evtclasslist_vals = comp_e['evtclasses']\n else:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n\n for mktimekey in mktimelist:\n zcut = \"zmax%i\" % comp_e['zmax']\n kwargs_mktime = dict(zcut=zcut,\n ebin=key_e,\n psftype='ALL',\n coordsys=coordsys,\n mktime=mktimekey)\n\n if do_ltsum:\n ltsum_listfile = 'ltsumlist_%s_%s' % (key_e, mktimekey)\n ltsum_outfile = 'ltsum_%s_%s' % (key_e, mktimekey)\n linkname = 'ltsum_%s_%s' % (key_e, mktimekey)\n self._set_link(likname, Gtlink_ltsum,\n infile1=ltsum_listfile,\n infile2=None,\n outfile=ltsum_outfile,\n logfile=os.path.join(outdir_base, \"%s.log\" % linkname))\n\n for evtclassval in evtclasslist_vals:\n for psf_type in sorted(comp_e['psf_types'].keys()):\n fullkey = \"%s_%s_%s_%s\"%(key_e, mktimekey, evtclassval, psf_type)\n linkname = 'coadd_%s' % (fullkey)\n kwargs_bin = kwargs_mktime.copy()\n kwargs_bin['psftype'] = psf_type\n kwargs_bin['evclass'] = evtclassval\n ccube_name =\\\n os.path.basename(NAME_FACTORY.ccube(**kwargs_bin))\n outputfile = os.path.join(outdir_base, ccube_name)\n args = _make_input_file_list(outputfile, num_files)\n \n self._set_link(linkname,\n Link_FermipyCoadd,\n args=args,\n output=outputfile,\n logfile=os.path.join(outdir_base, \"%s.log\" % linkname))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nhook to build job configurations from a YAML file.", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n\n datafile = args['data']\n if datafile is None or datafile == 'None':\n return job_configs\n NAME_FACTORY.update_base_dict(args['data'])\n outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')\n\n inputfiles = create_inputlist(args['ft1file'])\n num_files = len(inputfiles)\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n\n mktimelist = copy.copy(comp.mktimefilters)\n if not mktimelist:\n mktimelist.append('none')\n evtclasslist_keys = copy.copy(comp.evtclasses)\n if not evtclasslist_keys:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n else:\n evtclasslist_vals = copy.copy(evtclasslist_keys)\n\n for mktimekey in mktimelist:\n for evtclassval in evtclasslist_vals:\n fullkey = comp.make_key(\n '%s_%s_{ebin_name}_%s_{evtype_name}' %\n (evtclassval, zcut, mktimekey))\n\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=mktimekey,\n evclass=evtclassval,\n fullpath=True)\n\n ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys))\n outfile = os.path.join(outdir_base, ccube_name)\n infiles = _make_input_file_list(outfile, num_files)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[fullkey] = dict(args=infiles,\n output=outfile,\n logfile=logfile)\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef create_from_flux(cls, params, emin, emax, flux, scale=1.0):\n params = params.copy()\n params[0] = 1.0\n params[0] = flux / cls.eval_flux(emin, emax, params, scale=scale)\n return cls(params, scale)", "response": "Create a spectral function instance given its flux."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create_from_eflux(cls, params, emin, emax, eflux, scale=1.0):\n params = params.copy()\n params[0] = 1.0\n params[0] = eflux / cls.eval_eflux(emin, emax, params, scale=scale)\n return cls(params, scale)", "response": "Create a spectral function instance given its energy flux."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nfast numerical integration method using mid - point rule.", "response": "def _integrate(cls, fn, emin, emax, params, scale=1.0, extra_params=None,\n npt=20):\n \"\"\"Fast numerical integration method using mid-point rule.\"\"\"\n\n emin = np.expand_dims(emin, -1)\n emax = np.expand_dims(emax, -1)\n\n params = copy.deepcopy(params)\n for i, p in enumerate(params):\n params[i] = np.expand_dims(params[i], -1)\n\n xedges = np.linspace(0.0, 1.0, npt + 1)\n logx_edge = np.log(emin) + xedges * (np.log(emax) - np.log(emin))\n logx = 0.5 * (logx_edge[..., 1:] + logx_edge[..., :-1])\n xw = np.exp(logx_edge[..., 1:]) - np.exp(logx_edge[..., :-1])\n dnde = fn(np.exp(logx), params, scale, extra_params)\n return np.sum(dnde * xw, axis=-1)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nevaluates E times differential flux.", "response": "def ednde(self, x, params=None):\n \"\"\"Evaluate E times differential flux.\"\"\"\n params = self.params if params is None else params\n return np.squeeze(self.eval_ednde(x, params, self.scale,\n self.extra_params))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef e2dnde(self, x, params=None):\n params = self.params if params is None else params\n return np.squeeze(self.eval_e2dnde(x, params, self.scale,\n self.extra_params))", "response": "Evaluate E^2 times differential flux."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef dnde_deriv(self, x, params=None):\n params = self.params if params is None else params\n return np.squeeze(self.eval_dnde_deriv(x, params, self.scale,\n self.extra_params))", "response": "Evaluate derivative of the differential flux with respect to E."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nevaluating derivative of E times differential flux with respect to E.", "response": "def ednde_deriv(self, x, params=None):\n \"\"\"Evaluate derivative of E times differential flux with respect to\n E.\"\"\"\n params = self.params if params is None else params\n return np.squeeze(self.eval_ednde_deriv(x, params, self.scale,\n self.extra_params))"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nevaluate derivative of E^2 times differential flux with respect to E.", "response": "def e2dnde_deriv(self, x, params=None):\n \"\"\"Evaluate derivative of E^2 times differential flux with\n respect to E.\"\"\"\n params = self.params if params is None else params\n return np.squeeze(self.eval_e2dnde_deriv(x, params, self.scale,\n self.extra_params))"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef flux(self, emin, emax, params=None):\n params = self.params if params is None else params\n return np.squeeze(self.eval_flux(emin, emax, params, self.scale,\n self.extra_params))", "response": "Evaluate the integral flux."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef eflux(self, emin, emax, params=None):\n params = self.params if params is None else params\n return np.squeeze(self.eval_eflux(emin, emax, params, self.scale,\n self.extra_params))", "response": "Evaluate the integral energy flux."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ntest this source for spatial extension with the likelihood ratio method.", "response": "def extension(self, name, **kwargs):\n \"\"\"Test this source for spatial extension with the likelihood\n ratio method (TS_ext). This method will substitute an\n extended spatial model for the given source and perform a\n one-dimensional scan of the spatial extension parameter over\n the range specified with the width parameters. The 1-D\n profile likelihood is then used to compute the best-fit value,\n upper limit, and TS for extension. The nuisance parameters\n that will be simultaneously fit when performing the spatial\n scan can be controlled with the ``fix_shape``,\n ``free_background``, and ``free_radius`` options. By default\n the position of the source will be fixed to its current\n position. A simultaneous fit to position and extension can be\n performed by setting ``fit_position`` to True.\n\n Parameters\n ----------\n name : str\n Source name.\n\n {options}\n\n optimizer : dict\n Dictionary that overrides the default optimizer settings.\n\n Returns\n -------\n extension : dict\n Dictionary containing results of the extension analysis. The same\n dictionary is also saved to the dictionary of this source under\n 'extension'.\n \"\"\"\n timer = Timer.create(start=True)\n name = self.roi.get_source_by_name(name).name\n\n schema = ConfigSchema(self.defaults['extension'],\n optimizer=self.defaults['optimizer'])\n schema.add_option('prefix', '')\n schema.add_option('outfile', None, '', str)\n config = utils.create_dict(self.config['extension'],\n optimizer=self.config['optimizer'])\n config = schema.create_config(config, **kwargs)\n\n self.logger.info('Running extension fit for %s', name)\n\n free_state = FreeParameterState(self)\n ext = self._extension(name, **config)\n free_state.restore()\n\n self.logger.info('Finished extension fit.')\n\n if config['make_plots']:\n self._plotter.make_extension_plots(ext, self.roi,\n prefix=config['prefix'])\n\n outfile = config.get('outfile', None)\n if outfile is None:\n outfile = utils.format_filename(self.workdir, 'ext',\n prefix=[config['prefix'],\n name.lower().replace(' ', '_')])\n else:\n outfile = os.path.join(self.workdir,\n os.path.splitext(outfile)[0])\n\n if config['write_fits']:\n self._make_extension_fits(ext, outfile + '.fits')\n\n if config['write_npy']:\n np.save(outfile + '.npy', dict(ext))\n\n self.logger.info('Execution time: %.2f s', timer.elapsed_time)\n return ext"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef plotNLL_v_Flux(nll, fluxType, nstep=25, xlims=None):\n import matplotlib.pyplot as plt\n\n if xlims is None:\n xmin = nll.interp.xmin\n xmax = nll.interp.xmax\n else:\n xmin = xlims[0]\n xmax = xlims[1]\n\n y1 = nll.interp(xmin)\n y2 = nll.interp(xmax)\n\n ymin = min(y1, y2, 0.0)\n ymax = max(y1, y2, 0.5)\n\n xvals = np.linspace(xmin, xmax, nstep)\n yvals = nll.interp(xvals)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_xlim((xmin, xmax))\n ax.set_ylim((ymin, ymax))\n\n ax.set_xlabel(NORM_LABEL[fluxType])\n ax.set_ylabel(r'$-\\Delta \\log\\mathcal{L}$')\n ax.plot(xvals, yvals)\n return fig, ax", "response": "Plot the negative log - likelihood as a function of normalization"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nplots a color plot of the Subject log - likelihood v.", "response": "def plotCastro_base(castroData, ylims,\n xlabel, ylabel, nstep=25, zlims=None, global_min=False):\n \"\"\" Make a color plot (castro plot) of the \n log-likelihood as a function of \n energy and flux normalization\n\n castroData : A CastroData_Base object, with the \n log-likelihood v. normalization for each energy bin\n ylims : y-axis limits\n xlabel : x-axis title\n ylabel : y-axis title\n nstep : Number of y-axis steps to plot for each energy bin\n zlims : z-axis limits\n global_min : Plot the log-likelihood w.r.t. the global min.\n\n returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects\n \"\"\"\n import matplotlib.pyplot as plt\n\n ymin = ylims[0]\n ymax = ylims[1]\n if zlims is None:\n zmin = -10\n zmax = 0.\n else:\n zmin = zlims[0]\n zmax = zlims[1]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_ylim((ymin, ymax))\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n normVals = np.logspace(np.log10(ymin), np.log10(ymax), nstep)\n \n\n ztmp = []\n for i in range(castroData.nx):\n ztmp.append(castroData[i].interp(normVals))\n\n ztmp = np.asarray(ztmp).T\n ztmp *= -1.\n ztmp = np.where(ztmp < zmin, np.nan, ztmp)\n\n if global_min:\n global_offset = castroData.nll_offsets.min()\n offsets = global_offset - castroData.nll_offsets\n ztmp += offsets\n\n cmap = plt.get_cmap('jet_r')\n\n\n xedge = castroData.x_edges()\n ax.set_xlim((xedge[0], xedge[-1]))\n\n im = ax.pcolormesh(xedge, normVals, ztmp, \n vmin=zmin, vmax=zmax, cmap=cmap, linewidth=0)\n\n #cax = ax\n #cbar = plt.colorbar(im)\n #cbar.set_label(r\"$\\Delta \\log \\mathcal{L}$\")\n\n cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax))\n\n #ax.set_ylim()\n #plt.gca().set_yscale('log')\n #plt.gca().set_xscale('log')\n #plt.gca().set_xlim(sed['e_min'][0], sed['e_max'][-1])\n #\n #cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax))\n # cbar = fig.colorbar(im, ticks=np.arange(zmin,zmax),\n # fraction=0.10,panchor=(1.05,0.5))\n #cbar.set_label(r'$\\Delta \\log\\mathcal{L}$')\n #cax = None\n return fig, ax, im, ztmp, cax, cbar"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nmaking a color plot of the delta log - likelihood v.", "response": "def plotCastro(castroData, ylims, nstep=25, zlims=None):\n \"\"\" Make a color plot (castro plot) of the \n delta log-likelihood as a function of \n energy and flux normalization\n\n castroData : A CastroData object, with the \n log-likelihood v. normalization for each energy bin\n ylims : y-axis limits\n nstep : Number of y-axis steps to plot for each energy bin\n zlims : z-axis limits\n\n returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects\n \"\"\"\n xlabel = \"Energy [MeV]\"\n ylabel = NORM_LABEL[castroData.norm_type]\n return plotCastro_base(castroData, ylims,\n xlabel, ylabel, nstep, zlims)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nplot a SED plot of the negative log - likelihood vs. flux of each energy bin.", "response": "def plotSED(castroData, ylims, TS_thresh=4.0, errSigma=1.0, specVals=[]):\n \"\"\" Make a color plot (castro plot) of the (negative) log-likelihood \n as a function of energy and flux normalization\n\n castroData : A CastroData object, with the \n log-likelihood v. normalization for each energy bin\n ylims : y-axis limits\n TS_thresh : TS value above with to plot a point, \n rather than an upper limit\n errSigma : Number of sigma to use for error bars\n specVals : List of spectra to add to plot\n returns fig,ax which are matplotlib figure and axes objects\n \"\"\"\n import matplotlib.pyplot as plt\n\n xmin = castroData.refSpec.ebins[0]\n xmax = castroData.refSpec.ebins[-1]\n ymin = ylims[0]\n ymax = ylims[1]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim((xmin, xmax))\n ax.set_ylim((ymin, ymax))\n\n ax.set_xlabel(\"Energy [GeV]\")\n ax.set_ylabel(NORM_LABEL[castroData.norm_type])\n\n plotSED_OnAxis(ax, castroData, TS_thresh, errSigma)\n\n for spec in specVals:\n ax.loglog(castroData.refSpec.eref, spec)\n pass\n\n return fig, ax"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncomparing two SEDs castroData1: A CastroData object, with the log-likelihood v. normalization for each energy bin castroData2: A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects", "response": "def compare_SED(castroData1, castroData2, ylims, TS_thresh=4.0,\n errSigma=1.0, specVals=[]):\n \"\"\" Compare two SEDs \n\n castroData1: A CastroData object, with the \n log-likelihood v. normalization for each energy bin\n castroData2: A CastroData object, with the \n log-likelihood v. normalization for each energy bin\n ylims : y-axis limits\n TS_thresh : TS value above with to plot a point, \n rather than an upper limit\n errSigma : Number of sigma to use for error bars\n specVals : List of spectra to add to plot\n returns fig,ax which are matplotlib figure and axes objects\n \"\"\"\n import matplotlib.pyplot as plt\n\n xmin = min(castroData1.refSpec.ebins[0], castroData2.refSpec.ebins[0])\n xmax = max(castroData1.refSpec.ebins[-1], castroData2.refSpec.ebins[-1])\n ymin = ylims[0]\n ymax = ylims[1]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim((xmin, xmax))\n ax.set_ylim((ymin, ymax))\n\n ax.set_xlabel(\"Energy [GeV]\")\n ax.set_ylabel(NORM_LABEL[castroData1.norm_type])\n\n plotSED_OnAxis(ax, castroData1, TS_thresh, errSigma,\n colorLim='blue', colorPoint='blue')\n plotSED_OnAxis(ax, castroData2, TS_thresh, errSigma,\n colorLim='red', colorPoint='red')\n\n for spec in specVals:\n ax.loglog(castroData1.refSpec.eref, spec)\n\n return fig, ax"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nbuilding and return the information about the Galprop rings", "response": "def make_ring_dicts(**kwargs):\n \"\"\"Build and return the information about the Galprop rings\n \"\"\"\n library_yamlfile = kwargs.get('library', 'models/library.yaml')\n gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs))\n if library_yamlfile is None or library_yamlfile == 'None':\n return gmm\n diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml(library_yamlfile)\n for diffuse_value in diffuse_comps.values():\n if diffuse_value is None:\n continue\n if diffuse_value['model_type'] != 'galprop_rings':\n continue\n versions = diffuse_value['versions']\n for version in versions:\n gmm.make_ring_dict(version)\n return gmm"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nbuilds and return the information about the diffuse components", "response": "def make_diffuse_comp_info_dict(**kwargs):\n \"\"\"Build and return the information about the diffuse components\n \"\"\"\n library_yamlfile = kwargs.pop('library', 'models/library.yaml')\n components = kwargs.pop('components', None)\n if components is None:\n comp_yamlfile = kwargs.pop('comp', 'config/binning.yaml')\n components = Component.build_from_yamlfile(comp_yamlfile)\n gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs))\n dmm = kwargs.get('DiffuseModelManager', DiffuseModelManager(**kwargs))\n if library_yamlfile is None or library_yamlfile == 'None':\n diffuse_comps = {}\n else:\n diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml(\n library_yamlfile)\n diffuse_comp_info_dict = dmm.make_diffuse_comp_info_dict(\n diffuse_comps, components)\n for diffuse_value in diffuse_comps.values():\n if diffuse_value is None:\n continue\n if diffuse_value['model_type'] != 'galprop_rings':\n continue\n versions = diffuse_value['versions']\n for version in versions:\n galprop_dict = gmm.make_diffuse_comp_info_dict(version)\n diffuse_comp_info_dict.update(galprop_dict)\n\n return dict(comp_info_dict=diffuse_comp_info_dict,\n GalpropMapManager=gmm,\n DiffuseModelManager=dmm)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreading the yaml file for a partiuclar galprop key", "response": "def read_galprop_rings_yaml(self, galkey):\n \"\"\" Read the yaml file for a partiuclar galprop key\n \"\"\"\n galprop_rings_yaml = self._name_factory.galprop_rings_yaml(galkey=galkey,\n fullpath=True)\n galprop_rings = yaml.safe_load(open(galprop_rings_yaml))\n return galprop_rings"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmake the name of a gasmap file for a single ring", "response": "def make_ring_filename(self, source_name, ring, galprop_run):\n \"\"\" Make the name of a gasmap file for a single ring\n\n Parameters\n ----------\n\n source_name : str\n The galprop component, used to define path to gasmap files\n ring : int\n The ring index\n galprop_run : str\n String identifying the galprop parameters\n \"\"\"\n format_dict = self.__dict__.copy()\n format_dict['sourcekey'] = self._name_factory.galprop_ringkey(source_name=source_name,\n ringkey=\"ring_%i\" % ring)\n format_dict['galprop_run'] = galprop_run\n return self._name_factory.galprop_gasmap(**format_dict)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmakes the name of a merged gasmap file for a set of merged rings", "response": "def make_merged_name(self, source_name, galkey, fullpath):\n \"\"\" Make the name of a gasmap file for a set of merged rings\n\n Parameters\n ----------\n\n source_name : str\n The galprop component, used to define path to gasmap files\n galkey : str\n A short key identifying the galprop parameters\n fullpath : bool\n Return the full path name\n \"\"\"\n format_dict = self.__dict__.copy()\n format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name,\n galpropkey=galkey)\n format_dict['fullpath'] = fullpath\n return self._name_factory.merged_gasmap(**format_dict)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef make_xml_name(self, source_name, galkey, fullpath):\n format_dict = self.__dict__.copy()\n format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name,\n galpropkey=galkey)\n format_dict['fullpath'] = fullpath\n return self._name_factory.srcmdl_xml(**format_dict)", "response": "Make the name of an xml file for a set of merged rings\n "} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef make_ring_filelist(self, sourcekeys, rings, galprop_run):\n flist = []\n for sourcekey in sourcekeys:\n for ring in rings:\n flist += [self.make_ring_filename(sourcekey,\n ring, galprop_run)]\n return flist", "response": "Make a list of all the template files for a merged component"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nmaking a dictionary mapping the merged component names to list of template files", "response": "def make_ring_dict(self, galkey):\n \"\"\" Make a dictionary mapping the merged component names to list of template files\n\n Parameters\n ----------\n\n galkey : str\n Unique key for this ring dictionary\n\n Returns `model_component.GalpropMergedRingInfo`\n \"\"\"\n galprop_rings = self.read_galprop_rings_yaml(galkey)\n galprop_run = galprop_rings['galprop_run']\n ring_limits = galprop_rings['ring_limits']\n comp_dict = galprop_rings['diffuse_comp_dict']\n remove_rings = galprop_rings.get('remove_rings', [])\n ring_dict = {}\n nring = len(ring_limits) - 1\n for source_name, source_value in comp_dict.items():\n base_dict = dict(source_name=source_name,\n galkey=galkey,\n galprop_run=galprop_run)\n for iring in range(nring):\n sourcekey = \"%s_%i\" % (source_name, iring)\n if sourcekey in remove_rings:\n continue\n full_key = \"%s_%s\" % (sourcekey, galkey)\n rings = range(ring_limits[iring], ring_limits[iring + 1])\n base_dict.update(dict(ring=iring,\n sourcekey=sourcekey,\n files=self.make_ring_filelist(source_value,\n rings, galprop_run),\n merged_gasmap=self.make_merged_name(sourcekey,\n galkey, False)))\n ring_dict[full_key] = GalpropMergedRingInfo(**base_dict)\n self._ring_dicts[galkey] = ring_dict\n return ring_dict"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nmake the information about a single merged component.", "response": "def make_diffuse_comp_info(self, merged_name, galkey):\n \"\"\" Make the information about a single merged component\n\n Parameters\n ----------\n\n merged_name : str\n The name of the merged component\n galkey : str\n A short key identifying the galprop parameters\n\n Returns `Model_component.ModelComponentInfo`\n \"\"\"\n kwargs = dict(source_name=merged_name,\n source_ver=galkey,\n model_type='MapCubeSource',\n Spatial_Filename=self.make_merged_name(\n merged_name, galkey, fullpath=True),\n srcmdl_name=self.make_xml_name(merged_name, galkey, fullpath=True))\n return MapCubeComponentInfo(**kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nmake a dictionary maping from merged component to information about that component", "response": "def make_diffuse_comp_info_dict(self, galkey):\n \"\"\" Make a dictionary maping from merged component to information about that component\n\n Parameters\n ----------\n\n galkey : str\n A short key identifying the galprop parameters\n \"\"\"\n galprop_rings = self.read_galprop_rings_yaml(galkey)\n ring_limits = galprop_rings.get('ring_limits')\n comp_dict = galprop_rings.get('diffuse_comp_dict')\n remove_rings = galprop_rings.get('remove_rings', [])\n diffuse_comp_info_dict = {}\n nring = len(ring_limits) - 1\n for source_key in sorted(comp_dict.keys()):\n for iring in range(nring):\n source_name = \"%s_%i\" % (source_key, iring)\n if source_name in remove_rings:\n continue\n full_key = \"%s_%s\" % (source_name, galkey)\n diffuse_comp_info_dict[full_key] =\\\n self.make_diffuse_comp_info(source_name, galkey)\n self._diffuse_comp_info_dicts[galkey] = diffuse_comp_info_dict\n return diffuse_comp_info_dict"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef make_template_name(self, model_type, sourcekey):\n format_dict = self.__dict__.copy()\n format_dict['sourcekey'] = sourcekey\n if model_type == 'IsoSource':\n return self._name_factory.spectral_template(**format_dict)\n elif model_type in ['MapCubeSource', 'SpatialMap']:\n return self._name_factory.diffuse_template(**format_dict)\n else:\n raise ValueError(\"Unexpected model_type %s\" % model_type)", "response": "Makes the name of a template file for particular component."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nmaking the name of an xml file for a model definition of a single component.", "response": "def make_xml_name(self, sourcekey):\n \"\"\" Make the name of an xml file for a model definition of a single component\n\n Parameters\n ----------\n\n sourcekey : str\n Key to identify this component\n \"\"\"\n format_dict = self.__dict__.copy()\n format_dict['sourcekey'] = sourcekey\n return self._name_factory.srcmdl_xml(**format_dict)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nmaking a dictionary mapping the merged component names to list of template files.", "response": "def make_diffuse_comp_info(self, source_name, source_ver, diffuse_dict,\n components=None, comp_key=None):\n \"\"\" Make a dictionary mapping the merged component names to list of template files\n\n Parameters\n ----------\n\n source_name : str\n Name of the source\n source_ver : str\n Key identifying the version of the source\n diffuse_dict : dict\n Information about this component\n comp_key : str\n Used when we need to keep track of sub-components, i.e.,\n for moving and selection dependent sources.\n\n Returns `model_component.ModelComponentInfo` or\n `model_component.IsoComponentInfo`\n \"\"\"\n model_type = diffuse_dict['model_type']\n sourcekey = '%s_%s' % (source_name, source_ver)\n if comp_key is None:\n template_name = self.make_template_name(model_type, sourcekey)\n srcmdl_name = self.make_xml_name(sourcekey)\n else:\n template_name = self.make_template_name(\n model_type, \"%s_%s\" % (sourcekey, comp_key))\n srcmdl_name = self.make_xml_name(\"%s_%s\" % (sourcekey, comp_key))\n\n template_name = self._name_factory.fullpath(localpath=template_name)\n srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name)\n\n kwargs = dict(source_name=source_name,\n source_ver=source_ver,\n model_type=model_type,\n srcmdl_name=srcmdl_name,\n components=components,\n comp_key=comp_key)\n kwargs.update(diffuse_dict)\n if model_type == 'IsoSource':\n kwargs['Spectral_Filename'] = template_name\n return IsoComponentInfo(**kwargs)\n elif model_type == 'MapCubeSource':\n kwargs['Spatial_Filename'] = template_name\n return MapCubeComponentInfo(**kwargs)\n elif model_type == 'SpatialMap':\n kwargs['Spatial_Filename'] = template_name\n return SpatialMapComponentInfo(**kwargs)\n else:\n raise ValueError(\"Unexpected model type %s\" % model_type)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nmaking a dictionary mapping from diffuse component to model component info.", "response": "def make_diffuse_comp_info_dict(self, diffuse_sources, components):\n \"\"\" Make a dictionary maping from diffuse component to information about that component\n\n Parameters\n ----------\n\n diffuse_sources : dict\n Dictionary with diffuse source defintions\n components : dict\n Dictionary with event selection defintions,\n needed for selection depenedent diffuse components\n\n Returns\n -------\n\n ret_dict : dict\n Dictionary mapping sourcekey to `model_component.ModelComponentInfo`\n \"\"\"\n ret_dict = {}\n for key, value in diffuse_sources.items():\n if value is None:\n continue\n model_type = value.get('model_type', 'MapCubeSource')\n if model_type in ['galprop_rings', 'catalog']:\n continue\n selection_dependent = value.get('selection_dependent', False)\n moving = value.get('moving', False)\n versions = value.get('versions', [])\n for version in versions:\n # sourcekey = self._name_factory.sourcekey(source_name=key,\n # source_ver=version)\n comp_dict = None\n if selection_dependent:\n # For selection dependent diffuse sources we need to split\n # by binning component\n comp_dict = {}\n for comp in components:\n comp_key = comp.make_key('{ebin_name}_{evtype_name}')\n comp_dict[comp_key] = self.make_diffuse_comp_info(\n key, version, value, None, comp_key)\n elif moving:\n # For moving diffuse sources we need to split by zmax cut\n comp_dict = {}\n zmax_dict = {}\n for comp in components:\n zmax_dict[int(comp.zmax)] = True\n zmax_list = sorted(zmax_dict.keys())\n for zmax in zmax_list:\n comp_key = \"zmax%i\" % (zmax)\n comp_dict[comp_key] = self.make_diffuse_comp_info(\n key, version, value, None, comp_key)\n\n comp_info = self.make_diffuse_comp_info(\n key, version, value, comp_dict)\n ret_dict[comp_info.sourcekey] = comp_info\n\n self._diffuse_comp_info_dict.update(ret_dict)\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_unique_match(table, colname, value):\n # FIXME, This is here for python 3.5, where astropy is now returning bytes\n # instead of str\n if table[colname].dtype.kind in ['S', 'U']:\n mask = table[colname].astype(str) == value\n else:\n mask = table[colname] == value\n\n if mask.sum() != 1:\n raise KeyError(\"%i rows in column %s match value %s\" %\n (mask.sum(), colname, value))\n return np.argmax(mask)", "response": "Get the row matching value for a particular column."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef latch_file_info(self, args):\n self.file_dict.clear()\n for key, val in self.file_args.items():\n try:\n file_path = args[key]\n if file_path is None:\n continue\n # 'args' is special\n if key[0:4] == 'args':\n if isinstance(file_path, list):\n tokens = file_path\n elif isinstance(file_path, str):\n tokens = file_path.split()\n else:\n raise TypeError(\n \"Args has type %s, expect list or str\" % type(file_path))\n for token in tokens:\n self.file_dict[token.replace('.gz', '')] = val\n else:\n self.file_dict[file_path.replace('.gz', '')] = val\n except KeyError:\n pass", "response": "Extract the file paths from a set of arguments and store them in self. file_dict."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nupdates self with values from a dictionary mapping file path to FileFlags enum", "response": "def update(self, file_dict):\n \"\"\"Update self with values from a dictionary\n mapping file path [str] to `FileFlags` enum \"\"\"\n for key, val in file_dict.items():\n if key in self.file_dict:\n self.file_dict[key] |= val\n else:\n self.file_dict[key] = val"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef input_files(self):\n ret_list = []\n for key, val in self.file_dict.items():\n # For input files we only want files that were marked as input\n if val & FileFlags.input_mask:\n ret_list.append(key)\n return ret_list", "response": "Return a list of the input files needed by this link."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn a list of the output files produced by this link.", "response": "def output_files(self):\n \"\"\"Return a list of the output files produced by this link.\n\n For `Link` sub-classes this will return the union\n of all the output files of each internal `Link`.\n\n That is to say this will include files produced by one\n `Link` in a `Chain` and used as input to another `Link` in the `Chain`\n \"\"\"\n ret_list = []\n for key, val in self.file_dict.items():\n # For output files we only want files that were marked as output\n if val & FileFlags.output_mask:\n ret_list.append(key)\n return ret_list"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef chain_input_files(self):\n ret_list = []\n for key, val in self.file_dict.items():\n # For chain input files we only want files that were not marked as output\n # (I.e., not produced by some other step in the chain)\n if val & FileFlags.in_ch_mask == FileFlags.input_mask:\n ret_list.append(key)\n return ret_list", "response": "Return a list of the input files needed by this chain."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn a list of all the output files produced by this link.", "response": "def chain_output_files(self):\n \"\"\"Return a list of the all the output files produced by this link.\n\n For `Link` sub-classes this will return only those files\n that were not marked as internal files or marked for removal.\n \"\"\"\n ret_list = []\n for key, val in self.file_dict.items():\n # For pure input files we only want output files that were not\n # marked as internal or temp\n if val & FileFlags.out_ch_mask == FileFlags.output_mask:\n ret_list.append(key)\n return ret_list"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a list of the input files needed by this link.", "response": "def input_files_to_stage(self):\n \"\"\"Return a list of the input files needed by this link.\n\n For `Link` sub-classes this will return the union\n of all the input files of each internal `Link`.\n\n That is to say this will include files produced by one\n `Link` in a `Chain` and used as input to another `Link` in the `Chain`\n \"\"\"\n ret_list = []\n for key, val in self.file_dict.items():\n # For input files we only want files that were marked as input\n if val & FileFlags.in_stage_mask == FileFlags.in_stage_mask:\n ret_list.append(key)\n return ret_list"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef output_files_to_stage(self):\n ret_list = []\n for key, val in self.file_dict.items():\n # For input files we only want files that were marked as input\n if val & FileFlags.out_stage_mask == FileFlags.out_stage_mask:\n ret_list.append(key)\n return ret_list", "response": "Return a list of the input files needed by this link."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef internal_files(self):\n ret_list = []\n for key, val in self.file_dict.items():\n # For internal files we only want files that were marked as\n # internal\n if val & FileFlags.internal_mask:\n ret_list.append(key)\n return ret_list", "response": "Return a list of the intermediate files produced by this link."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning a list of the temporary files produced by this link. This returns all the temporary files produced by this link.", "response": "def temp_files(self):\n \"\"\"Return a list of the temporary files produced by this link.\n\n This returns all files that were explicitly marked for removal.\n \"\"\"\n ret_list = []\n for key, val in self.file_dict.items():\n # For temp files we only want files that were marked for removal\n if val & FileFlags.rm_mask:\n ret_list.append(key)\n return ret_list"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef gzip_files(self):\n ret_list = []\n for key, val in self.file_dict.items():\n # For temp files we only want files that were marked for removal\n if val & FileFlags.gz_mask:\n ret_list.append(key)\n return ret_list", "response": "Return a list of the files compressed by this link."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nprint a summary of the files in this file dict. This version explictly counts the union of all input and output files.", "response": "def print_summary(self, stream=sys.stdout, indent=\"\"):\n \"\"\"Print a summary of the files in this file dict.\n\n This version explictly counts the union of all input and output files.\n \"\"\"\n stream.write(\"%sTotal files : %i\\n\" %\n (indent, len(self.file_dict)))\n stream.write(\"%s Input files : %i\\n\" %\n (indent, len(self.input_files)))\n stream.write(\"%s Output files : %i\\n\" %\n (indent, len(self.output_files)))\n stream.write(\"%s Internal files : %i\\n\" %\n (indent, len(self.internal_files)))\n stream.write(\"%s Temp files : %i\\n\" %\n (indent, len(self.temp_files)))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef print_chain_summary(self, stream=sys.stdout, indent=\"\"):\n stream.write(\"%sTotal files : %i\\n\" %\n (indent, len(self.file_dict)))\n stream.write(\"%s Input files : %i\\n\" %\n (indent, len(self.chain_input_files)))\n stream.write(\"%s Output files : %i\\n\" %\n (indent, len(self.chain_output_files)))\n stream.write(\"%s Internal files : %i\\n\" %\n (indent, len(self.internal_files)))\n stream.write(\"%s Temp files : %i\\n\" %\n (indent, len(self.temp_files)))", "response": "Print a summary of the files in this file dict."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef split_local_path(self, local_file):\n abspath = os.path.abspath(local_file)\n if abspath.find(self.workdir) >= 0:\n relpath = abspath.replace(self.workdir, '')[1:]\n basename = os.path.basename(relpath)\n dirname = os.path.dirname(relpath)\n else:\n basename = os.path.basename(local_file)\n dirname = ''\n return (dirname, basename)", "response": "Split the local path into a directory name and a file name."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef construct_scratch_path(self, dirname, basename):\n return os.path.join(self.scratchdir, dirname, basename)", "response": "Construct and return a path in the scratch area."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nconstructing and return a path in the scratch area from a local file.", "response": "def get_scratch_path(self, local_file):\n \"\"\"Construct and return a path in the scratch area from a local file.\n \"\"\"\n (local_dirname, local_basename) = self.split_local_path(local_file)\n return self.construct_scratch_path(local_dirname, local_basename)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nbuilds a dictionary mapping local paths to scratch paths.", "response": "def map_files(self, local_files):\n \"\"\"Build a dictionary mapping local paths to scratch paths.\n\n Parameters\n ----------\n\n local_files : list\n List of filenames to be mapped to scratch area\n\n Returns dict\n Mapping local_file : fullpath of scratch file\n \"\"\"\n ret_dict = {}\n for local_file in local_files:\n ret_dict[local_file] = self.get_scratch_path(local_file)\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nmake any directories need in the scratch area", "response": "def make_scratch_dirs(file_mapping, dry_run=True):\n \"\"\"Make any directories need in the scratch area\"\"\"\n scratch_dirs = {}\n for value in file_mapping.values():\n scratch_dirname = os.path.dirname(value)\n scratch_dirs[scratch_dirname] = True\n for scratch_dirname in scratch_dirs:\n if dry_run:\n print(\"mkdir -f %s\" % (scratch_dirname))\n else:\n try:\n os.makedirs(scratch_dirname)\n except OSError:\n pass"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef copy_to_scratch(file_mapping, dry_run=True):\n for key, value in file_mapping.items():\n if not os.path.exists(key):\n continue\n if dry_run:\n print (\"copy %s %s\" % (key, value))\n else:\n print (\"copy %s %s\" % (key, value))\n copyfile(key, value)\n\n return file_mapping", "response": "Copy input files to scratch area"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef copy_from_scratch(file_mapping, dry_run=True):\n for key, value in file_mapping.items():\n if dry_run:\n print (\"copy %s %s\" % (value, key))\n else:\n try:\n outdir = os.path.dirname(key)\n os.makedirs(outdir)\n except OSError:\n pass\n print (\"copy %s %s\" % (value, key))\n copyfile(value, key)\n return file_mapping", "response": "Copy files from scratch area to local file system"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nbuilds and return an astropy. table. Table to store the file_dict.", "response": "def make_table(file_dict):\n \"\"\"Build and return an `astropy.table.Table` to store `FileHandle`\"\"\"\n col_key = Column(name='key', dtype=int)\n col_path = Column(name='path', dtype='S256')\n col_creator = Column(name='creator', dtype=int)\n col_timestamp = Column(name='timestamp', dtype=int)\n col_status = Column(name='status', dtype=int)\n col_flags = Column(name='flags', dtype=int)\n columns = [col_key, col_path, col_creator,\n col_timestamp, col_status, col_flags]\n table = Table(data=columns)\n for val in file_dict.values():\n val.append_to_table(table)\n return table"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nbuilding and return a dict of FileHandle from an astropy. table. Table", "response": "def make_dict(cls, table):\n \"\"\"Build and return a dict of `FileHandle` from an `astropy.table.Table`\n\n The dictionary is keyed by FileHandle.key, which is a unique integer for each file\n \"\"\"\n ret_dict = {}\n for row in table:\n file_handle = cls.create_from_row(row)\n ret_dict[file_handle.key] = file_handle\n return ret_dict"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create_from_row(cls, table_row):\n kwargs = {}\n for key in table_row.colnames:\n kwargs[key] = table_row[key]\n try:\n return cls(**kwargs)\n except KeyError:\n print(kwargs)", "response": "Build and return a FileHandle from an astropy. table. row. Row"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nchecking on the status of this particular file", "response": "def check_status(self, basepath=None):\n \"\"\"Check on the status of this particular file\"\"\"\n if basepath is None:\n fullpath = self.path\n else:\n fullpath = os.path.join(basepath, self.path)\n\n exists = os.path.exists(fullpath)\n if not exists:\n if self.flags & FileFlags.gz_mask != 0:\n fullpath += '.gz'\n exists = os.path.exists(fullpath)\n if exists:\n if self.status == FileStatus.superseded:\n pass\n else:\n self.status = FileStatus.exists\n else:\n if self.status in [FileStatus.no_file,\n FileStatus.expected,\n FileStatus.missing,\n FileStatus.temp_removed]:\n if self.flags & FileFlags.rmint_mask != 0:\n self.status = FileStatus.temp_removed\n elif self.status == FileStatus.exists:\n self.status = FileStatus.missing\n elif self.status == FileStatus.exists:\n self.status = FileStatus.temp_removed\n return self.status"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nadd this instance to a table.", "response": "def append_to_table(self, table):\n \"\"\"Add this instance as a row on a `astropy.table.Table` \"\"\"\n table.add_row(dict(path=self.path,\n key=self.key,\n creator=self.creator,\n timestamp=self.timestamp,\n status=self.status,\n flags=self.flags))"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nupdating the values in an astropy. table. Table for this instances", "response": "def update_table_row(self, table, row_idx):\n \"\"\"Update the values in an `astropy.table.Table` for this instances\"\"\"\n table[row_idx]['path'] = self.path\n table[row_idx]['key'] = self.key\n table[row_idx]['creator'] = self.creator\n table[row_idx]['timestamp'] = self.timestamp\n table[row_idx]['status'] = self.status\n table[row_idx]['flags'] = self.flags"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns the full path to the file.", "response": "def _get_fullpath(self, filepath):\n \"\"\"Return filepath with the base_path prefixed \"\"\"\n if filepath[0] == '/':\n return filepath\n return os.path.join(self._base_path, filepath)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _fill_cache(self):\n for irow in range(len(self._table)):\n file_handle = self._make_file_handle(irow)\n self._cache[file_handle.path] = file_handle", "response": "Fill the cache from the table."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _read_table_file(self, table_file):\n self._table_file = table_file\n if os.path.exists(self._table_file):\n self._table = Table.read(self._table_file)\n else:\n self._table = FileHandle.make_table({})\n self._fill_cache()", "response": "Read an astropy. table. Table to set up the archive"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _make_file_handle(self, row_idx):\n row = self._table[row_idx]\n return FileHandle.create_from_row(row)", "response": "Build and return a FileHandle object from an astropy. table. row. Row"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngets the FileHandle object associated to a particular file", "response": "def get_handle(self, filepath):\n \"\"\"Get the `FileHandle` object associated to a particular file \"\"\"\n localpath = self._get_localpath(filepath)\n return self._cache[localpath]"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags):\n # check to see if the file already exists\n try:\n file_handle = self.get_handle(filepath)\n raise KeyError(\"File %s already exists in archive\" % filepath)\n except KeyError:\n pass\n localpath = self._get_localpath(filepath)\n if status == FileStatus.exists:\n # Make sure the file really exists\n fullpath = self._get_fullpath(filepath)\n if not os.path.exists(fullpath):\n print(\"register_file called on called on mising file %s\" % fullpath)\n status = FileStatus.missing\n timestamp = 0\n else:\n timestamp = int(os.stat(fullpath).st_mtime)\n else:\n timestamp = 0\n key = len(self._table) + 1\n file_handle = FileHandle(path=localpath,\n key=key,\n creator=creator,\n timestamp=timestamp,\n status=status,\n flags=flags)\n file_handle.append_to_table(self._table)\n self._cache[localpath] = file_handle\n return file_handle", "response": "Register a file in the archive."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nupdates a file in the archive with the given filepath and creator and status.", "response": "def update_file(self, filepath, creator, status):\n \"\"\"Update a file in the archive\n\n If the file does not exists, this raises a `KeyError`\n\n Parameters\n ----------\n\n filepath : str\n The path to the file\n creatror : int\n A unique key for the job that created this file\n status : `FileStatus`\n Enumeration giving current status of file\n\n Returns `FileHandle`\n \"\"\"\n file_handle = self.get_handle(filepath)\n if status in [FileStatus.exists, FileStatus.superseded]:\n # Make sure the file really exists\n fullpath = file_handle.fullpath\n if not os.path.exists(fullpath):\n raise ValueError(\"File %s does not exist\" % fullpath)\n timestamp = int(os.stat(fullpath).st_mtime)\n else:\n timestamp = 0\n file_handle.creator = creator\n file_handle.timestamp = timestamp\n file_handle.status = status\n file_handle.update_table_row(self._table, file_handle.key - 1)\n return file_handle"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef get_file_ids(self, file_list, creator=None,\n status=FileStatus.no_file, file_dict=None):\n \"\"\"Get or create a list of file ids based on file names\n\n Parameters\n ----------\n\n file_list : list\n The paths to the file\n creatror : int\n A unique key for the job that created these files\n status : `FileStatus`\n Enumeration giving current status of files\n file_dict : `FileDict`\n Mask giving flags set on this file\n\n Returns list of integers\n \"\"\"\n ret_list = []\n for fname in file_list:\n if file_dict is None:\n flags = FileFlags.no_flags\n else:\n flags = file_dict.file_dict[fname]\n try:\n fhandle = self.get_handle(fname)\n except KeyError:\n if creator is None:\n creator = -1\n # raise KeyError(\"Can not register a file %s without a creator\"%fname)\n fhandle = self.register_file(fname, creator, status, flags)\n ret_list.append(fhandle.key)\n return ret_list", "response": "Get or create a list of file ids based on file names and status"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nget a list of file paths based on a set of ids", "response": "def get_file_paths(self, id_list):\n \"\"\"Get a list of file paths based of a set of ids\n\n Parameters\n ----------\n\n id_list : list\n List of integer file keys\n\n Returns list of file paths\n \"\"\"\n if id_list is None:\n return []\n try:\n path_array = self._table[id_list - 1]['path']\n except IndexError:\n print(\"IndexError \", len(self._table), id_list)\n path_array = []\n return [path for path in path_array]"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nwrite the table to the table file", "response": "def write_table_file(self, table_file=None):\n \"\"\"Write the table to self._table_file\"\"\"\n if self._table is None:\n raise RuntimeError(\"No table to write\")\n if table_file is not None:\n self._table_file = table_file\n if self._table_file is None:\n raise RuntimeError(\"No output file specified for table\")\n write_tables_to_fits(self._table_file, [self._table], clobber=True,\n namelist=['FILE_ARCHIVE'])"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef update_file_status(self):\n nfiles = len(self.cache.keys())\n status_vect = np.zeros((6), int)\n sys.stdout.write(\"Updating status of %i files: \" % nfiles)\n sys.stdout.flush()\n for i, key in enumerate(self.cache.keys()):\n if i % 200 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n fhandle = self.cache[key]\n fhandle.check_status(self._base_path)\n fhandle.update_table_row(self._table, fhandle.key - 1)\n status_vect[fhandle.status] += 1\n\n sys.stdout.write(\"!\\n\")\n sys.stdout.flush()\n sys.stdout.write(\"Summary:\\n\")\n sys.stdout.write(\" no_file: %i\\n\" % status_vect[0])\n sys.stdout.write(\" expected: %i\\n\" % status_vect[1])\n sys.stdout.write(\" exists: %i\\n\" % status_vect[2])\n sys.stdout.write(\" missing: %i\\n\" % status_vect[3])\n sys.stdout.write(\" superseded: %i\\n\" % status_vect[4])\n sys.stdout.write(\" temp_removed: %i\\n\" % status_vect[5])", "response": "Update the status of all the files in the archive"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nmakes the list of input files for a particular energy bin X psf type", "response": "def _make_ltcube_file_list(ltsumfile, num_files):\n \"\"\"Make the list of input files for a particular energy bin X psf type \"\"\"\n outbasename = os.path.basename(ltsumfile)\n lt_list_file = ltsumfile.replace('fits', 'lst')\n outfile = open(lt_list_file, 'w')\n for i in range(num_files):\n split_key = \"%06i\" % i\n output_dir = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes', split_key)\n filepath = os.path.join(output_dir, outbasename.replace('.fits', '_%s.fits' % split_key))\n outfile.write(filepath)\n outfile.write(\"\\n\")\n outfile.close()\n return '@' + lt_list_file"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nregister these classes with the LinkFactory class.", "response": "def register_classes():\n \"\"\"Register these classes with the `LinkFactory` \"\"\"\n Gtlink_select.register_class()\n Gtlink_bin.register_class()\n Gtlink_expcube2.register_class()\n Gtlink_scrmaps.register_class()\n Gtlink_mktime.register_class()\n Gtlink_ltcube.register_class()\n Link_FermipyCoadd.register_class()\n Link_FermipyGatherSrcmaps.register_class()\n Link_FermipyVstack.register_class()\n Link_FermipyHealview.register_class()\n Gtexpcube2_SG.register_class()\n Gtltsum_SG.register_class()\n SumRings_SG.register_class()\n Vstack_SG.register_class()\n GatherSrcmaps_SG.register_class()\n Healview_SG.register_class()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nhook to build job configurations from a YAML file.", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n datafile = args['data']\n if datafile is None or datafile == 'None':\n return job_configs\n NAME_FACTORY.update_base_dict(args['data'])\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n\n mktimelist = copy.copy(comp.mktimefilters)\n if not mktimelist:\n mktimelist.append('none')\n evtclasslist_keys = copy.copy(comp.evtclasses)\n if not evtclasslist_keys:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n else:\n evtclasslist_vals = copy.copy(evtclasslist_keys)\n\n for mktimekey in mktimelist:\n for evtclassval in evtclasslist_vals:\n fullkey = comp.make_key(\n '%s_%s_{ebin_name}_%s_{evtype_name}' %\n (evtclassval, zcut, mktimekey))\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=mktimekey,\n evclass=evtclassval,\n fullpath=True)\n\n outfile = NAME_FACTORY.bexpcube(**name_keys)\n cmap = NAME_FACTORY.ccube(**name_keys)\n infile = NAME_FACTORY.ltcube(**name_keys)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[fullkey] = dict(cmap=cmap,\n infile=infile,\n outfile=outfile,\n irfs=NAME_FACTORY.irfs(**name_keys),\n hpx_order=min(\n comp.hpx_order, args['hpx_order_max']),\n evtype=comp.evtype,\n logfile=logfile)\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef build_job_configs(self, args):\n job_configs = {}\n\n gmm = make_ring_dicts(library=args['library'], basedir='.')\n\n for galkey in gmm.galkeys():\n ring_dict = gmm.ring_dict(galkey)\n for ring_key, ring_info in ring_dict.items():\n output_file = ring_info.merged_gasmap\n file_string = \"\"\n for fname in ring_info.files:\n file_string += \" %s\" % fname\n logfile = make_nfs_path(output_file.replace('.fits', '.log'))\n job_configs[ring_key] = dict(output=output_file,\n args=file_string,\n logfile=logfile)\n\n return job_configs", "response": "Hook to build job configurations for all the modules in the GEMMAP"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nhook to build job configurations from the given arguments", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n ret_dict = make_diffuse_comp_info_dict(components=components,\n library=args['library'],\n basedir=NAME_FACTORY.base_dict['basedir'])\n diffuse_comp_info_dict = ret_dict['comp_info_dict']\n\n for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()):\n diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key]\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n if diffuse_comp_info_value.components is None:\n sub_comp_info = diffuse_comp_info_value\n else:\n sub_comp_info = diffuse_comp_info_value.get_component_info(comp)\n name_keys = dict(zcut=zcut,\n sourcekey=sub_comp_info.sourcekey,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n mktime='none',\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n fullpath=True)\n\n outfile = NAME_FACTORY.srcmaps(**name_keys)\n outfile_tokens = os.path.splitext(outfile)\n infile_regexp = \"%s_*.fits*\" % outfile_tokens[0]\n full_key = \"%s_%s\" % (sub_comp_info.sourcekey, key)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[full_key] = dict(output=outfile,\n args=infile_regexp,\n hdu=sub_comp_info.source_name,\n logfile=logfile)\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nhooks to build job configurations from a yaml file", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n ret_dict = make_catalog_comp_dict(library=args['library'],\n basedir=NAME_FACTORY.base_dict['basedir'])\n catalog_info_dict = ret_dict['catalog_info_dict']\n\n for catalog_name in catalog_info_dict:\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n name_keys = dict(zcut=zcut,\n sourcekey=catalog_name,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime='none',\n fullpath=True)\n\n outfile = NAME_FACTORY.srcmaps(**name_keys)\n outfile_tokens = os.path.splitext(outfile)\n infile_regexp = \"%s_*.fits\" % outfile_tokens[0]\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[key] = dict(output=outfile,\n args=infile_regexp,\n logfile=logfile)\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef ln_norm(x, mu, sigma=1.0):\n return np.log(stats.norm(loc=mu, scale=sigma).pdf(x))", "response": "Natural log of scipy norm function truncated at zero"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nlogging - normal function from scipy", "response": "def lognorm(x, mu, sigma=1.0):\n \"\"\" Log-normal function from scipy \"\"\"\n return stats.lognorm(sigma, scale=mu).pdf(x)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef log10norm(x, mu, sigma=1.0):\n return stats.lognorm(sigma * np.log(10), scale=mu).pdf(x)", "response": "Scale scipy lognorm from natural log to base 10"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef lgauss(x, mu, sigma=1.0, logpdf=False):\n x = np.array(x, ndmin=1)\n\n lmu = np.log10(mu)\n s2 = sigma * sigma\n\n lx = np.zeros(x.shape)\n v = np.zeros(x.shape)\n\n lx[x > 0] = np.log10(x[x > 0])\n\n v = 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(lx - lmu)**2 / (2 * s2))\n\n if not logpdf:\n v /= (x * np.log(10.))\n\n v[x <= 0] = -np.inf\n\n return v", "response": "Log10 normal distribution...\n\n x : Parameter of interest for scanning the pdf\n mu : Peak of the lognormal distribution (mean of the underlying\n normal distribution is log10(mu)\n sigma : Standard deviation of the underlying normal distribution"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a prior_functor from a dictionary.", "response": "def create_prior_functor(d):\n \"\"\"Build a prior from a dictionary.\n\n Parameters\n ----------\n d : A dictionary, it must contain:\n d['functype'] : a recognized function type\n and all of the required parameters for the \n prior_functor of the desired type\n\n Returns\n ----------\n A sub-class of '~fermipy.stats_utils.prior_functor'\n\n Recognized types are:\n\n 'lognorm' : Scipy lognormal distribution\n 'norm' : Scipy normal distribution\n 'gauss' : Gaussian truncated at zero\n 'lgauss' : Gaussian in log-space\n 'lgauss_like' : Gaussian in log-space, with arguments reversed. \n 'lgauss_logpdf' : ???\n \"\"\"\n functype = d.get('functype', 'lgauss_like')\n j_ref = d.get('j_ref', 1.0)\n if functype == 'norm':\n return norm_prior(d['mu'], d['sigma'], j_ref)\n elif functype == 'lognorm':\n return lognorm_prior(d['mu'], d['sigma'], j_ref)\n elif functype == 'gauss':\n return function_prior(functype, d['mu'], d['sigma'], gauss, lngauss, j_ref)\n elif functype == 'lgauss':\n return function_prior(functype, d['mu'], d['sigma'], lgauss, lnlgauss, j_ref)\n elif functype in ['lgauss_like', 'lgauss_lik']:\n def fn(x, y, s): return lgauss(y, x, s)\n\n def lnfn(x, y, s): return lnlgauss(y, x, s)\n return function_prior(functype, d['mu'], d['sigma'], fn, lnfn, j_ref)\n elif functype == 'lgauss_log':\n def fn(x, y, s): return lgauss(x, y, s, logpdf=True)\n\n def lnfn(x, y, s): return lnlgauss(x, y, s, logpdf=True)\n return function_prior(functype, d['mu'], d['sigma'], fn, lnfn, j_ref)\n else:\n raise KeyError(\"Unrecognized prior_functor type %s\" % functype)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef marginalization_bins(self):\n log_mean = np.log10(self.mean())\n # Default is to marginalize over two decades,\n # centered on mean, using 1000 bins\n return np.logspace(-1. + log_mean, 1. + log_mean, 1001)/self._j_ref", "response": "Return the marginalization bins for the current set of items"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef profile_bins(self):\n log_mean = np.log10(self.mean())\n log_half_width = max(5. * self.sigma(), 3.)\n # Default is to profile over +-5 sigma,\n # centered on mean, using 100 bins\n return np.logspace(log_mean - log_half_width,\n log_mean + log_half_width, 101)/self._j_ref", "response": "The binning of the species in the log10 distribution"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef normalization(self):\n norm_r = self.normalization_range() \n return quad(self, norm_r[0]*self._j_ref, norm_r[1]*self._j_ref)[0]", "response": "The normalization of the entry set"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef init_return(self, ret_type):\n if self._ret_type == ret_type:\n return\n if ret_type == \"straight\":\n self._interp = self._lnlfn.interp\n if ret_type == \"profile\":\n self._profile_loglike_spline(self._lnlfn.interp.x)\n #self._profile_loglike(self._lnlfn.interp.x)\n self._interp = self._prof_interp\n elif ret_type == \"marginal\":\n self._marginal_loglike(self._lnlfn.interp.x)\n self._interp = self._marg_interp\n elif ret_type == \"posterior\":\n self._posterior(self._lnlfn.interp.x)\n self._interp = self._post_interp\n else:\n raise ValueError(\"Did not recognize return type %s\" % ret_type)\n\n self._ret_type = ret_type", "response": "Initializes the object with the requested return type."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nremove all of the cached values and interpolators", "response": "def clear_cached_values(self):\n \"\"\"Removes all of the cached values and interpolators\n \"\"\"\n self._prof_interp = None\n self._prof_y = None\n self._prof_z = None\n self._marg_interp = None\n self._marg_z = None\n self._post = None\n self._post_interp = None\n self._interp = None\n self._ret_type = None"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nevaluates the 2 - D likelihood in the x / y parameter space.", "response": "def like(self, x, y):\n \"\"\"Evaluate the 2-D likelihood in the x/y parameter space.\n\n The dimension of the two input arrays should be the same.\n\n Parameters\n ----------\n x : array_like\n Array of coordinates in the `x` parameter.\n\n y : array_like \n Array of coordinates in the `y` nuisance parameter.\n \"\"\"\n # This is the negative log-likelihood\n z = self._lnlfn.interp(x * y)\n return np.exp(-z) * self._nuis_pdf(y) / self._nuis_norm"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef loglike(self, x, y):\n nuis = self._nuis_pdf(y)\n log_nuis = np.where( nuis > 0., np.log(nuis), -1e2)\n vals = -self._lnlfn.interp(x * y ) + \\\n log_nuis - \\\n self._nuis_log_norm\n return vals", "response": "Evaluate the 2 - D log - likelihood in the x / y parameter space."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nprofiles log - likelihood.", "response": "def profile_loglike(self, x):\n \"\"\"Profile log-likelihood.\n\n Returns ``L_prof(x,y=y_min|z')`` : where y_min is the \n value of y that minimizes \n L for a given x.\n\n This will used the cached '~fermipy.castro.Interpolator' object \n if possible, and construct it if needed.\n \"\"\"\n if self._prof_interp is None:\n # This calculates values and caches the spline\n return self._profile_loglike(x)[1]\n\n x = np.array(x, ndmin=1)\n return self._prof_interp(x)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef marginal_loglike(self, x):\n if self._marg_interp is None:\n # This calculates values and caches the spline\n return self._marginal_loglike(x)\n\n x = np.array(x, ndmin=1)\n return self._marg_interp(x)", "response": "Marginal log - likelihood."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _profile_loglike_spline(self, x):\n z = []\n y = []\n\n yv = self._nuis_pdf.profile_bins()\n nuis_vals = self._nuis_pdf.log_value(yv) - self._nuis_log_norm\n for xtmp in x:\n zv = -1. * self._lnlfn.interp(xtmp * yv) + nuis_vals\n sp = splrep(yv, zv, k=2, s=0)\n\n def rf(t): return splev(t, sp, der=1)\n ix = np.argmax(splev(yv, sp))\n imin, imax = max(0, ix - 3), min(len(yv) - 1, ix + 3)\n try:\n y0 = opt.brentq(rf, yv[imin], yv[imax], xtol=1e-10)\n except:\n y0 = yv[ix]\n z0 = self.loglike(xtmp, y0)\n z.append(z0)\n y.append(y0)\n\n self._prof_y = np.array(y)\n self._prof_z = np.array(z)\n self._prof_z = self._prof_z.max() - self._prof_z\n \n self._prof_interp = castro.Interpolator(x, self._prof_z)\n return self._prof_y, self._prof_z", "response": "Internal function to calculate and cache the profile likelihood\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _marginal_loglike(self, x):\n yedge = self._nuis_pdf.marginalization_bins()\n yw = yedge[1:] - yedge[:-1]\n yc = 0.5 * (yedge[1:] + yedge[:-1])\n\n s = self.like(x[:, np.newaxis], yc[np.newaxis, :])\n\n # This does the marginalization integral\n z = 1. * np.sum(s * yw, axis=1)\n self._marg_z = np.zeros(z.shape)\n msk = z > 0\n self._marg_z[msk] = -1 * np.log(z[msk])\n\n # Extrapolate to unphysical values\n # FIXME, why is this needed\n dlogzdx = (np.log(z[msk][-1]) - np.log(z[msk][-2])\n ) / (x[msk][-1] - x[msk][-2])\n self._marg_z[~msk] = self._marg_z[msk][-1] + \\\n (self._marg_z[~msk] - self._marg_z[msk][-1]) * dlogzdx\n self._marg_interp = castro.Interpolator(x, self._marg_z)\n return self._marg_z", "response": "Internal function to calculate and cache the marginal likelihood\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _compute_mle(self):\n xmax = self._lnlfn.interp.xmax\n x0 = max(self._lnlfn.mle(), xmax * 1e-5)\n ret = opt.fmin(lambda x: np.where(\n xmax > x > 0, -self(x), np.inf), x0, disp=False)\n mle = float(ret[0])\n return mle", "response": "Compute the maximum likelihood estimator."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef build_from_energy_dict(cls, ebin_name, input_dict):\n psf_types = input_dict.pop('psf_types')\n output_list = []\n for psf_type, val_dict in sorted(psf_types.items()):\n fulldict = input_dict.copy()\n fulldict.update(val_dict)\n fulldict['evtype_name'] = psf_type\n fulldict['ebin_name'] = ebin_name\n component = cls(**fulldict)\n output_list += [component]\n return output_list", "response": "Build a list of components from a dictionary for a single energy range"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nbuilding a list of components from a yaml string.", "response": "def build_from_yamlstr(cls, yamlstr):\n \"\"\" Build a list of components from a yaml string\n \"\"\"\n top_dict = yaml.safe_load(yamlstr)\n coordsys = top_dict.pop('coordsys')\n output_list = []\n for e_key, e_dict in sorted(top_dict.items()):\n if e_key == 'coordsys':\n continue\n e_dict = top_dict[e_key]\n e_dict['coordsys'] = coordsys\n output_list += cls.build_from_energy_dict(e_key, e_dict)\n return output_list"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _match_cubes(ccube_clean, ccube_dirty,\n bexpcube_clean, bexpcube_dirty,\n hpx_order):\n \"\"\" Match the HEALPIX scheme and order of all the input cubes\n\n return a dictionary of cubes with the same HEALPIX scheme and order\n \"\"\"\n if hpx_order == ccube_clean.hpx.order:\n ccube_clean_at_order = ccube_clean\n else:\n ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True)\n\n if hpx_order == ccube_dirty.hpx.order:\n ccube_dirty_at_order = ccube_dirty\n else:\n ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True)\n\n if hpx_order == bexpcube_clean.hpx.order:\n bexpcube_clean_at_order = bexpcube_clean\n else:\n bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True)\n\n if hpx_order == bexpcube_dirty.hpx.order:\n bexpcube_dirty_at_order = bexpcube_dirty\n else:\n bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True)\n\n if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:\n ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme()\n\n if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest:\n bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme()\n\n if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:\n bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme()\n\n ret_dict = dict(ccube_clean=ccube_clean_at_order,\n ccube_dirty=ccube_dirty_at_order,\n bexpcube_clean=bexpcube_clean_at_order,\n bexpcube_dirty=bexpcube_dirty_at_order)\n return ret_dict", "response": "Match the HEALPIX scheme and order of all the input cubes\nInterrupt and return a dictionary of cubes with the same HEALPIX scheme and order of all the input cubes\nInterrupt and order of all the input cubes\nInterrupt."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _compute_intensity(ccube, bexpcube):\n bexp_data = np.sqrt(bexpcube.data[0:-1, 0:] * bexpcube.data[1:, 0:])\n intensity_data = ccube.data / bexp_data\n intensity_map = HpxMap(intensity_data, ccube.hpx)\n return intensity_map", "response": "Compute the intensity map from the given Cube and Bexpcube."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _compute_mean(map1, map2):\n data = (map1.data + map2.data) / 2.\n return HpxMap(data, map1.hpx)", "response": "Compute the mean of two maps"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _compute_ratio(top, bot):\n data = np.where(bot.data > 0, top.data / bot.data, 0.)\n return HpxMap(data, top.hpx)", "response": "Compute the ratio of two maps."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _compute_diff(map1, map2):\n data = map1.data - map2.data\n return HpxMap(data, map1.hpx)", "response": "Compute the difference between two maps"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _compute_product(map1, map2):\n data = map1.data * map2.data\n return HpxMap(data, map1.hpx)", "response": "Compute the product of two maps."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncomputing the counts map from the intensity and bexpcube", "response": "def _compute_counts_from_intensity(intensity, bexpcube):\n \"\"\" Make the counts map from the intensity\n \"\"\"\n data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1])\n return HpxMap(data, intensity.hpx)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncompute the counts maps from the model and bexpcube", "response": "def _compute_counts_from_model(model, bexpcube):\n \"\"\" Make the counts maps from teh mdoe\n \"\"\"\n data = model.data * bexpcube.data\n ebins = model.hpx.ebins\n ratio = ebins[1:] / ebins[0:-1]\n half_log_ratio = np.log(ratio) / 2.\n int_map = ((data[0:-1].T * ebins[0:-1]) + (data[1:].T * ebins[1:])) * half_log_ratio\n return HpxMap(int_map.T, model.hpx)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _make_bright_pixel_mask(intensity_mean, mask_factor=5.0):\n mask = np.zeros((intensity_mean.data.shape), bool)\n nebins = len(intensity_mean.data)\n sum_intensity = intensity_mean.data.sum(0)\n mean_intensity = sum_intensity.mean()\n for i in range(nebins):\n mask[i, 0:] = sum_intensity > (mask_factor * mean_intensity)\n return HpxMap(mask, intensity_mean.hpx)", "response": "Make a mask of all the brightest pixels"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _get_aeff_corrections(intensity_ratio, mask):\n nebins = len(intensity_ratio.data)\n aeff_corrections = np.zeros((nebins))\n for i in range(nebins):\n bright_pixels_intensity = intensity_ratio.data[i][mask.data[i]]\n mean_bright_pixel = bright_pixels_intensity.mean()\n aeff_corrections[i] = 1. / mean_bright_pixel\n\n print(\"Aeff correction: \", aeff_corrections)\n return aeff_corrections", "response": "Compute a correction for the effective area from the brighter pixesl\n "} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _apply_aeff_corrections(intensity_map, aeff_corrections):\n data = aeff_corrections * intensity_map.data.T\n return HpxMap(data.T, intensity_map.hpx)", "response": "Multipy a map by the effective area corrections."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _fill_masked_intensity_resid(intensity_resid, bright_pixel_mask):\n filled_intensity = np.zeros((intensity_resid.data.shape))\n nebins = len(intensity_resid.data)\n for i in range(nebins):\n masked = bright_pixel_mask.data[i]\n unmasked = np.invert(masked)\n mean_intensity = intensity_resid.data[i][unmasked].mean()\n filled_intensity[i] = np.where(masked, mean_intensity, intensity_resid.data[i])\n return HpxMap(filled_intensity, intensity_resid.hpx)", "response": "Fill the pixels used to compute the effective area correction with the mean intensity"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nsmooths a healpix map using a Gaussian", "response": "def _smooth_hpx_map(hpx_map, sigma):\n \"\"\" Smooth a healpix map using a Gaussian\n \"\"\"\n if hpx_map.hpx.ordering == \"NESTED\":\n ring_map = hpx_map.swap_scheme()\n else:\n ring_map = hpx_map\n ring_data = ring_map.data.copy()\n nebins = len(hpx_map.data)\n smoothed_data = np.zeros((hpx_map.data.shape))\n for i in range(nebins):\n smoothed_data[i] = healpy.sphtfunc.smoothing(\n ring_data[i], sigma=np.radians(sigma), verbose=False)\n\n smoothed_data.clip(0., 1e99)\n smoothed_ring_map = HpxMap(smoothed_data, ring_map.hpx)\n if hpx_map.hpx.ordering == \"NESTED\":\n return smoothed_ring_map.swap_scheme()\n return smoothed_ring_map"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nconvert an integral quantity to differential quantity.", "response": "def _intergral_to_differential(hpx_map, gamma=-2.0):\n \"\"\" Convert integral quantity to differential quantity\n\n Here we are assuming the spectrum is a powerlaw with index gamma and we\n are using log-log-quadrature to compute the integral quantities.\n \"\"\"\n nebins = len(hpx_map.data)\n diff_map = np.zeros((nebins + 1, hpx_map.hpx.npix))\n ebins = hpx_map.hpx.ebins\n ratio = ebins[1:] / ebins[0:-1]\n half_log_ratio = np.log(ratio) / 2.\n ratio_gamma = np.power(ratio, gamma)\n #ratio_inv_gamma = np.power(ratio, -1. * gamma)\n\n diff_map[0] = hpx_map.data[0] / ((ebins[0] + ratio_gamma[0] * ebins[1]) * half_log_ratio[0])\n for i in range(nebins):\n diff_map[i + 1] = (hpx_map.data[i] / (ebins[i + 1] *\n half_log_ratio[i])) - (diff_map[i] / ratio[i])\n return HpxMap(diff_map, hpx_map.hpx)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nconverting a differential map to an integral map.", "response": "def _differential_to_integral(hpx_map):\n \"\"\" Convert a differential map to an integral map\n\n Here we are using log-log-quadrature to compute the integral quantities.\n \"\"\"\n ebins = hpx_map.hpx.ebins\n ratio = ebins[1:] / ebins[0:-1]\n half_log_ratio = np.log(ratio) / 2.\n int_map = ((hpx_map.data[0:-1].T * ebins[0:-1]) +\n (hpx_map.data[1:].T * ebins[1:])) * half_log_ratio\n return HpxMap(int_map.T, hpx_map.hpx)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef run_analysis(self, argv):\n args = self._parser.parse_args(argv)\n\n # Read the input maps\n ccube_dirty = HpxMap.create_from_fits(args.ccube_dirty, hdu='SKYMAP')\n bexpcube_dirty = HpxMap.create_from_fits(args.bexpcube_dirty, hdu='HPXEXPOSURES')\n ccube_clean = HpxMap.create_from_fits(args.ccube_clean, hdu='SKYMAP')\n bexpcube_clean = HpxMap.create_from_fits(args.bexpcube_clean, hdu='HPXEXPOSURES')\n\n # Decide what HEALPix order to work at\n if args.hpx_order:\n hpx_order = args.hpx_order\n else:\n hpx_order = ccube_dirty.hpx.order\n\n # Cast all the input maps to match ccube_clean\n cube_dict = ResidualCR._match_cubes(ccube_clean, ccube_dirty,\n bexpcube_clean, bexpcube_dirty, hpx_order)\n\n # Intenstiy maps\n intensity_clean = ResidualCR._compute_intensity(cube_dict['ccube_clean'],\n cube_dict['bexpcube_clean'])\n intensity_dirty = ResidualCR._compute_intensity(cube_dict['ccube_dirty'],\n cube_dict['bexpcube_dirty'])\n # Mean & ratio of intensity maps\n intensity_mean = ResidualCR._compute_mean(intensity_dirty,\n intensity_clean)\n intensity_ratio = ResidualCR._compute_ratio(intensity_dirty,\n intensity_clean)\n # Selecting the bright pixels for Aeff correction and to mask when filling output map\n bright_pixel_select = ResidualCR._make_bright_pixel_mask(intensity_mean,\n args.select_factor)\n bright_pixel_mask = ResidualCR._make_bright_pixel_mask(intensity_mean,\n args.mask_factor)\n # Compute thte Aeff corrections using the brightest pixels\n aeff_corrections = ResidualCR._get_aeff_corrections(intensity_ratio,\n bright_pixel_select)\n # Apply the Aeff corrections and get the intensity residual\n corrected_dirty = ResidualCR._apply_aeff_corrections(intensity_dirty,\n aeff_corrections)\n corrected_ratio = ResidualCR._compute_ratio(corrected_dirty,\n intensity_clean)\n intensity_resid = ResidualCR._compute_diff(corrected_dirty,\n intensity_clean)\n # Replace the masked pixels with the map mean to avoid features associates with sources\n filled_resid = ResidualCR._fill_masked_intensity_resid(intensity_resid,\n bright_pixel_mask)\n # Smooth the map\n smooth_resid = ResidualCR._smooth_hpx_map(filled_resid,\n args.sigma)\n # Convert to a differential map\n out_model = ResidualCR._intergral_to_differential(smooth_resid)\n\n # Make the ENERGIES HDU\n out_energies = ccube_dirty.hpx.make_energies_hdu()\n\n # Write the maps\n cubes = dict(SKYMAP=out_model)\n fits_utils.write_maps(None, cubes,\n args.outfile, energy_hdu=out_energies)\n\n if args.full_output:\n # Some diagnostics\n check = ResidualCR._differential_to_integral(out_model)\n check_resid = ResidualCR._compute_diff(smooth_resid, check)\n counts_resid =\\\n ResidualCR._compute_counts_from_intensity(intensity_resid,\n cube_dict['bexpcube_dirty'])\n pred_counts\\\n = ResidualCR._compute_counts_from_model(out_model,\n cube_dict['bexpcube_dirty'])\n pred_resid = ResidualCR._compute_diff(pred_counts, counts_resid)\n\n out_ebounds = ccube_dirty.hpx.make_energy_bounds_hdu()\n cubes = dict(INTENSITY_CLEAN=intensity_clean,\n INTENSITY_DIRTY=intensity_dirty,\n INTENSITY_RATIO=intensity_ratio,\n CORRECTED_DIRTY=corrected_dirty,\n CORRECTED_RATIO=corrected_ratio,\n INTENSITY_RESID=intensity_resid,\n PIXEL_SELECT=bright_pixel_select,\n PIXEL_MASK=bright_pixel_mask,\n FILLED_RESID=filled_resid,\n SMOOTH_RESID=smooth_resid,\n CHECK=check,\n CHECK_RESID=check_resid,\n COUNTS_RESID=counts_resid,\n PRED_COUNTS=pred_counts,\n PRED_RESID=pred_resid)\n\n fits_utils.write_maps(None, cubes,\n args.outfile.replace('.fits', '_full.fits'),\n energy_hdu=out_ebounds)", "response": "Run this analysis on the input maps and return the result of the analysis."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nhook to build job configurations from the given arguments", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n NAME_FACTORY_CLEAN.update_base_dict(args['data'])\n NAME_FACTORY_DIRTY.update_base_dict(args['data'])\n\n NAME_FACTORY_CLEAN.base_dict['evclass'] = args['clean']\n NAME_FACTORY_DIRTY.base_dict['evclass'] = args['dirty']\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n key = comp.make_key('{ebin_name}_{evtype_name}')\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=args['mktimefilter'],\n fullpath=True)\n outfile = NAME_FACTORY.residual_cr(**name_keys)\n if args['hpx_order']:\n hpx_order = min(comp.hpx_order, args['hpx_order'])\n else:\n hpx_order = comp.hpx_order\n job_configs[key] = dict(bexpcube_dirty=NAME_FACTORY_DIRTY.bexpcube(**name_keys),\n ccube_dirty=NAME_FACTORY_DIRTY.ccube(**name_keys),\n bexpcube_clean=NAME_FACTORY_CLEAN.bexpcube(**name_keys),\n ccube_clean=NAME_FACTORY_CLEAN.ccube(**name_keys),\n outfile=outfile,\n hpx_order=hpx_order,\n full_output=args['full_output'],\n logfile=make_nfs_path(outfile.replace('.fits', '.log')))\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n\n config_yaml = args['config']\n config_dict = load_yaml(config_yaml)\n\n data = config_dict.get('data')\n comp = config_dict.get('comp')\n dry_run = args.get('dry_run', False)\n\n self._set_link('prepare', SplitAndMktimeChain,\n comp=comp, data=data,\n ft1file=config_dict['ft1file'],\n ft2file=config_dict['ft2file'],\n hpx_order_ccube=config_dict.get('hpx_order_ccube', 7),\n hpx_order_expcube=config_dict.get('hpx_order_expcube', 7),\n mktime=config_dict.get('mktimefitler', None),\n do_ltsum=config_dict.get('do_ltsum', False),\n scratch=config_dict.get('scratch', None),\n dry_run=dry_run)\n\n self._set_link('residual-cr', ResidualCR_SG,\n comp=comp, data=data,\n mktimefilter=config_dict.get('mktimefitler', None),\n hpx_order=config_dict.get('hpx_order_fitting', 4),\n clean=config_dict.get('clean_class', None),\n dirty=config_dict.get('dirty_class', None), \n select_factor=config_dict.get('select_factor', None),\n mask_factor=config_dict.get('mask_factor', None),\n sigma=config_dict.get('sigma', None),\n full_output=config_dict.get('full_output', False),\n dry_run=dry_run)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreduce a 3D counts cube to a 2D counts map", "response": "def sum_over_energy(self):\n \"\"\" Reduce a 3D counts cube to a 2D counts map\n \"\"\"\n # Note that the array is using the opposite convention from WCS\n # so we sum over axis 0 in the array, but drop axis 2 in the WCS object\n return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns the flattened pixel indices from an array multi - dimensional pixel indices.", "response": "def xypix_to_ipix(self, xypix, colwise=False):\n \"\"\"Return the flattened pixel indices from an array multi-dimensional\n pixel indices.\n\n Parameters\n ----------\n xypix : list\n List of pixel indices in the order (LON,LAT,ENERGY).\n\n colwise : bool\n Use column-wise pixel indexing.\n \"\"\"\n return np.ravel_multi_index(xypix, self.npix,\n order='F' if colwise else 'C',\n mode='raise')"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef ipix_to_xypix(self, ipix, colwise=False):\n return np.unravel_index(ipix, self.npix,\n order='F' if colwise else 'C')", "response": "Return array multi - dimensional pixel indices from flattened index."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the transposed pixel index from the pixel xy coordinates ipix", "response": "def ipix_swap_axes(self, ipix, colwise=False):\n \"\"\" Return the transposed pixel index from the pixel xy coordinates\n\n if colwise is True (False) this assumes the original index was\n in column wise scheme\n \"\"\"\n xy = self.ipix_to_xypix(ipix, colwise)\n return self.xypix_to_ipix(xy, not colwise)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_pixel_skydirs(self):\n\n xpix = np.linspace(0, self.npix[0] - 1., self.npix[0])\n ypix = np.linspace(0, self.npix[1] - 1., self.npix[1])\n xypix = np.meshgrid(xpix, ypix, indexing='ij')\n return SkyCoord.from_pixel(np.ravel(xypix[0]),\n np.ravel(xypix[1]), self.wcs)", "response": "Get a list of sky coordinates for the centers of every pixel."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_pixel_indices(self, lons, lats, ibin=None):\n lons = np.array(lons, ndmin=1)\n lats = np.array(lats, ndmin=1)\n\n if len(lats) != len(lons):\n raise RuntimeError('Map.get_pixel_indices, input lengths '\n 'do not match %i %i' % (len(lons), len(lats)))\n if len(self._npix) == 2:\n pix_x, pix_y = self._wcs.wcs_world2pix(lons, lats, 0)\n pixcrd = [np.floor(pix_x).astype(int), np.floor(pix_y).astype(int)]\n elif len(self._npix) == 3:\n all_lons = np.expand_dims(lons, -1)\n all_lats = np.expand_dims(lats, -1)\n if ibin is None:\n all_bins = (np.expand_dims(\n np.arange(self.npix[2]), -1) * np.ones(lons.shape)).T\n else:\n all_bins = ibin\n\n l = self.wcs.wcs_world2pix(all_lons, all_lats, all_bins, 0)\n pix_x = l[0]\n pix_y = l[1]\n pixcrd = [np.floor(l[0]).astype(int), np.floor(l[1]).astype(int),\n all_bins.astype(int)]\n\n return pixcrd", "response": "Return the indices in the flat array corresponding to a set of coordinates."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_map_values(self, lons, lats, ibin=None):\n pix_idxs = self.get_pixel_indices(lons, lats, ibin)\n idxs = copy.copy(pix_idxs)\n\n m = np.empty_like(idxs[0], dtype=bool)\n m.fill(True)\n for i, p in enumerate(pix_idxs):\n m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])\n idxs[i][~m] = 0\n\n vals = self.counts.T[idxs]\n vals[~m] = np.nan\n return vals", "response": "Return the values corresponding to a set of coordinates."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_from_hdu(cls, hdu, ebins):\n hpx = HPX.create_from_hdu(hdu, ebins)\n colnames = hdu.columns.names\n cnames = []\n if hpx.conv.convname == 'FGST_SRCMAP_SPARSE':\n pixs = hdu.data.field('PIX')\n chans = hdu.data.field('CHANNEL')\n keys = chans * hpx.npix + pixs\n vals = hdu.data.field('VALUE')\n nebin = len(ebins)\n data = np.zeros((nebin, hpx.npix))\n data.flat[keys] = vals\n else:\n for c in colnames:\n if c.find(hpx.conv.colstring) == 0:\n cnames.append(c)\n nebin = len(cnames)\n data = np.ndarray((nebin, hpx.npix))\n for i, cname in enumerate(cnames):\n data[i, 0:] = hdu.data.field(cname)\n\n return cls(data, hpx)", "response": "Creates and returns an HpxMap object from a FITS HDU."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create_from_hdulist(cls, hdulist, **kwargs):\n extname = kwargs.get('hdu', hdulist[1].name)\n ebins = fits_utils.find_and_read_ebins(hdulist)\n return cls.create_from_hdu(hdulist[extname], ebins)", "response": "Creates and returns an HpxMap object from a FITS HDUList"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef make_wcs_from_hpx(self, sum_ebins=False, proj='CAR', oversample=2,\n normalize=True):\n \"\"\"Make a WCS object and convert HEALPix data into WCS projection\n\n NOTE: this re-calculates the mapping, if you have already\n calculated the mapping it is much faster to use\n convert_to_cached_wcs() instead\n\n Parameters\n ----------\n sum_ebins : bool\n sum energy bins over energy bins before reprojecting\n\n proj : str\n WCS-projection\n\n oversample : int\n Oversampling factor for WCS map\n\n normalize : bool\n True -> perserve integral by splitting HEALPix values between bins\n\n returns (WCS object, np.ndarray() with reprojected data)\n\n \"\"\"\n self._wcs_proj = proj\n self._wcs_oversample = oversample\n self._wcs_2d = self.hpx.make_wcs(2, proj=proj, oversample=oversample)\n self._hpx2wcs = HpxToWcsMapping(self.hpx, self._wcs_2d)\n wcs, wcs_data = self.convert_to_cached_wcs(self.counts, sum_ebins,\n normalize)\n return wcs, wcs_data", "response": "Make a WCS object and convert HEALPix data into WCS projection"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nconvert HEALPix data into a WCS object.", "response": "def convert_to_cached_wcs(self, hpx_in, sum_ebins=False, normalize=True):\n \"\"\" Make a WCS object and convert HEALPix data into WCS projection\n\n Parameters\n ----------\n hpx_in : `~numpy.ndarray`\n HEALPix input data\n sum_ebins : bool\n sum energy bins over energy bins before reprojecting\n normalize : bool\n True -> perserve integral by splitting HEALPix values between bins\n\n returns (WCS object, np.ndarray() with reprojected data)\n \"\"\"\n if self._hpx2wcs is None:\n raise Exception('HpxMap.convert_to_cached_wcs() called '\n 'before make_wcs_from_hpx()')\n\n if len(hpx_in.shape) == 1:\n wcs_data = np.ndarray(self._hpx2wcs.npix)\n loop_ebins = False\n hpx_data = hpx_in\n elif len(hpx_in.shape) == 2:\n if sum_ebins:\n wcs_data = np.ndarray(self._hpx2wcs.npix)\n hpx_data = hpx_in.sum(0)\n loop_ebins = False\n else:\n wcs_data = np.ndarray((self.counts.shape[0],\n self._hpx2wcs.npix[0],\n self._hpx2wcs.npix[1]))\n hpx_data = hpx_in\n loop_ebins = True\n else:\n raise Exception('Wrong dimension for HpxMap %i' %\n len(hpx_in.shape))\n\n if loop_ebins:\n for i in range(hpx_data.shape[0]):\n self._hpx2wcs.fill_wcs_map_from_hpx_data(\n hpx_data[i], wcs_data[i], normalize)\n pass\n wcs_data.reshape((self.counts.shape[0], self._hpx2wcs.npix[\n 0], self._hpx2wcs.npix[1]))\n # replace the WCS with a 3D one\n wcs = self.hpx.make_wcs(3, proj=self._wcs_proj,\n energies=np.log10(self.hpx.ebins),\n oversample=self._wcs_oversample)\n else:\n self._hpx2wcs.fill_wcs_map_from_hpx_data(\n hpx_data, wcs_data, normalize)\n wcs_data.reshape(self._hpx2wcs.npix)\n wcs = self._wcs_2d\n\n return wcs, wcs_data"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ngetting a list of sky coordinates for the centers of every pixel.", "response": "def get_pixel_skydirs(self):\n \"\"\"Get a list of sky coordinates for the centers of every pixel. \"\"\"\n sky_coords = self._hpx.get_sky_coords()\n if self.hpx.coordsys == 'GAL':\n return SkyCoord(l=sky_coords.T[0], b=sky_coords.T[1], unit='deg', frame='galactic')\n else:\n return SkyCoord(ra=sky_coords.T[0], dec=sky_coords.T[1], unit='deg', frame='icrs')"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreduce a counts cube to a counts map", "response": "def sum_over_energy(self):\n \"\"\" Reduce a counts cube to a counts map \"\"\"\n # We sum over axis 0 in the array, and drop the energy binning in the\n # hpx object\n return HpxMap(np.sum(self.counts, axis=0), self.hpx.copy_and_drop_energy())"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the values of the pixels in the flat array corresponding to a set of coordinates lons and lats.", "response": "def get_map_values(self, lons, lats, ibin=None):\n \"\"\"Return the indices in the flat array corresponding to a set of coordinates\n\n Parameters\n ----------\n lons : array-like\n 'Longitudes' (RA or GLON)\n\n lats : array-like\n 'Latitidues' (DEC or GLAT)\n\n ibin : int or array-like\n Extract data only for a given energy bin. None -> extract data for all bins\n\n Returns\n ----------\n vals : numpy.ndarray((n))\n Values of pixels in the flattened map, np.nan used to flag\n coords outside of map\n \"\"\"\n theta = np.pi / 2. - np.radians(lats)\n phi = np.radians(lons)\n\n pix = hp.ang2pix(self.hpx.nside, theta, phi, nest=self.hpx.nest)\n\n if self.data.ndim == 2:\n return self.data[:, pix] if ibin is None else self.data[ibin, pix]\n else:\n return self.data[pix]"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef interpolate(self, lon, lat, egy=None, interp_log=True):\n\n if self.data.ndim == 1:\n theta = np.pi / 2. - np.radians(lat)\n phi = np.radians(lon)\n return hp.pixelfunc.get_interp_val(self.counts, theta,\n phi, nest=self.hpx.nest)\n else:\n return self._interpolate_cube(lon, lat, egy, interp_log)", "response": "Interpolate the map values."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nperforms interpolation on a healpix cube.", "response": "def _interpolate_cube(self, lon, lat, egy=None, interp_log=True):\n \"\"\"Perform interpolation on a healpix cube. If egy is None\n then interpolation will be performed on the existing energy\n planes.\n\n \"\"\"\n\n shape = np.broadcast(lon, lat, egy).shape\n lon = lon * np.ones(shape)\n lat = lat * np.ones(shape)\n theta = np.pi / 2. - np.radians(lat)\n phi = np.radians(lon)\n vals = []\n for i, _ in enumerate(self.hpx.evals):\n v = hp.pixelfunc.get_interp_val(self.counts[i], theta,\n phi, nest=self.hpx.nest)\n vals += [np.expand_dims(np.array(v, ndmin=1), -1)]\n\n vals = np.concatenate(vals, axis=-1)\n\n if egy is None:\n return vals.T\n\n egy = egy * np.ones(shape)\n\n if interp_log:\n xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy))\n else:\n xvals = utils.val_to_pix(self.hpx.evals, egy)\n\n vals = vals.reshape((-1, vals.shape[-1]))\n xvals = np.ravel(xvals)\n v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals],\n order=1)\n return v.reshape(shape)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef expanded_counts_map(self):\n if self.hpx._ipix is None:\n return self.counts\n\n output = np.zeros(\n (self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)\n for i in range(self.counts.shape[0]):\n output[i][self.hpx._ipix] = self.counts[i]\n return output", "response": "return the full counts map"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef explicit_counts_map(self, pixels=None):\n # No pixel index, so build one\n if self.hpx._ipix is None:\n if self.data.ndim == 2:\n summed = self.counts.sum(0)\n if pixels is None:\n nz = summed.nonzero()[0]\n else:\n nz = pixels\n data_out = np.vstack(self.data[i].flat[nz]\n for i in range(self.data.shape[0]))\n else:\n if pixels is None:\n nz = self.data.nonzero()[0]\n else:\n nz = pixels\n data_out = self.data[nz]\n return (nz, data_out)\n else:\n if pixels is None:\n return (self.hpx._ipix, self.data)\n # FIXME, can we catch this\n raise RuntimeError(\n 'HPX.explicit_counts_map called with pixels for a map that already has pixels')", "response": "return a counts map with explicit index scheme"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef sparse_counts_map(self):\n if self.hpx._ipix is None:\n flatarray = self.data.flattern()\n else:\n flatarray = self.expanded_counts_map()\n nz = flatarray.nonzero()[0]\n data_out = flatarray[nz]\n return (nz, data_out)", "response": "return a counts map with sparse index scheme"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncompute signal and background counts for a point source at the given skydir with spectral parameterization fn.", "response": "def compute_counts(self, skydir, fn, ebins=None):\n \"\"\"Compute signal and background counts for a point source at\n position ``skydir`` with spectral parameterization ``fn``.\n\n Parameters\n ----------\n skydir : `~astropy.coordinates.SkyCoord`\n\n ebins : `~numpy.ndarray`\n\n Returns\n -------\n sig : `~numpy.ndarray`\n Signal counts array. Dimensions are energy, angular\n separation, and event type.\n\n bkg : `~numpy.ndarray`\n Background counts array. Dimensions are energy, angular\n separation, and event type.\n\n \"\"\"\n\n if ebins is None:\n ebins = self.ebins\n ectr = self.ectr\n else:\n ectr = np.exp(utils.edge_to_center(np.log(ebins)))\n\n skydir_cel = skydir.transform_to('icrs')\n skydir_gal = skydir.transform_to('galactic')\n\n sig = []\n bkg = []\n bkg_fit = None\n if self._gdiff_fit is not None:\n bkg_fit = []\n\n\n for psf, exp in zip(self._psf, self._exp):\n\n coords0 = np.meshgrid(*[skydir_cel.ra.deg, ectr], indexing='ij')\n coords1 = np.meshgrid(*[skydir_cel.dec.deg, ectr], indexing='ij')\n\n # expv = exp.interpolate(skydir_cel.icrs.ra.deg,\n # skydir_cel.icrs.dec.deg,\n # ectr)\n\n expv = exp.interpolate(coords0[0], coords1[0], coords0[1])\n\n coords0 = np.meshgrid(*[skydir_gal.l.deg, ectr], indexing='ij')\n coords1 = np.meshgrid(*[skydir_gal.b.deg, ectr], indexing='ij')\n\n bkgv = self._gdiff.interpolate(np.ravel(coords0[0]),\n np.ravel(coords1[0]),\n np.ravel(coords0[1]))\n bkgv = bkgv.reshape(expv.shape)\n \n\n # bkgv = self._gdiff.interpolate(\n # skydir_gal.l.deg, skydir_gal.b.deg, ectr)\n\n isov = np.exp(np.interp(np.log(ectr), np.log(self._iso[0]),\n np.log(self._iso[1])))\n bkgv += isov\n\n \n s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn,\n egy_dim=1,\n spatial_model=self.spatial_model,\n spatial_size=self.spatial_size)\n\n sig += [s0]\n bkg += [b0]\n\n if self._iso_fit is not None:\n isov_fit = np.exp(np.interp(np.log(ectr), np.log(self._iso_fit[0]),\n np.log(self._iso_fit[1])))\n else:\n isov_fit = isov\n\n if self._gdiff_fit is not None:\n bkgv_fit = self._gdiff_fit.interpolate(np.ravel(coords0[0]),\n np.ravel(coords1[0]),\n np.ravel(coords0[1]))\n bkgv_fit = bkgv_fit.reshape(expv.shape)\n bkgv_fit += isov_fit\n s0, b0 = irfs.compute_ps_counts(ebins, expv, psf,\n bkgv_fit, fn, egy_dim=1,\n spatial_model=self.spatial_model,\n spatial_size=self.spatial_size)\n bkg_fit += [b0]\n\n sig = np.concatenate([np.expand_dims(t, -1) for t in sig])\n bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg])\n if self._gdiff_fit is not None:\n bkg_fit = np.concatenate([np.expand_dims(t, -1) for t in bkg_fit])\n\n return sig, bkg, bkg_fit"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef diff_flux_threshold(self, skydir, fn, ts_thresh, min_counts):\n\n sig, bkg, bkg_fit = self.compute_counts(skydir, fn)\n norms = irfs.compute_norm(sig, bkg, ts_thresh,\n min_counts, sum_axes=[2, 3],\n rebin_axes=[10, 1],\n bkg_fit=bkg_fit)\n\n npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [2, 3]))\n norms = np.squeeze(norms)\n\n flux = norms * fn.flux(self.ebins[:-1], self.ebins[1:])\n eflux = norms * fn.eflux(self.ebins[:-1], self.ebins[1:])\n dnde = norms * fn.dnde(self.ectr)\n e2dnde = self.ectr**2 * dnde\n\n return dict(e_min=self.ebins[:-1], e_max=self.ebins[1:],\n e_ref=self.ectr,\n npred=npred, flux=flux, eflux=eflux,\n dnde=dnde, e2dnde=e2dnde)", "response": "Compute the differential flux threshold for a point source atthe given skydir with spectral parameterization fn."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ncomputes the integral flux threshold for a point source at the given skydir with spectral parameterization fn.", "response": "def int_flux_threshold(self, skydir, fn, ts_thresh, min_counts):\n \"\"\"Compute the integral flux threshold for a point source at\n position ``skydir`` with spectral parameterization ``fn``.\n\n \"\"\"\n\n ebins = 10**np.linspace(np.log10(self.ebins[0]),\n np.log10(self.ebins[-1]), 33)\n ectr = np.sqrt(ebins[0] * ebins[-1])\n\n sig, bkg, bkg_fit = self.compute_counts(skydir, fn, ebins)\n\n norms = irfs.compute_norm(sig, bkg, ts_thresh,\n min_counts, sum_axes=[1, 2, 3], bkg_fit=bkg_fit,\n rebin_axes=[4, 10, 1])\n\n npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [1, 2, 3]))\n npred = np.array(npred, ndmin=1)\n flux = np.squeeze(norms) * fn.flux(ebins[0], ebins[-1])\n eflux = np.squeeze(norms) * fn.eflux(ebins[0], ebins[-1])\n dnde = np.squeeze(norms) * fn.dnde(ectr)\n e2dnde = ectr**2 * dnde\n\n o = dict(e_min=self.ebins[0], e_max=self.ebins[-1], e_ref=ectr,\n npred=npred, flux=flux, eflux=eflux,\n dnde=dnde, e2dnde=e2dnde)\n\n sig, bkg, bkg_fit = self.compute_counts(skydir, fn)\n\n npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig,\n [2, 3]))\n flux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] *\n fn.flux(self.ebins[:-1], self.ebins[1:]))\n eflux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] *\n fn.eflux(self.ebins[:-1], self.ebins[1:]))\n dnde = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))\n [:, None] * fn.dnde(self.ectr))\n e2dnde = ectr**2 * dnde\n\n o['bins'] = dict(npred=npred,\n flux=flux,\n eflux=eflux,\n dnde=dnde,\n e2dnde=e2dnde,\n e_min=self.ebins[:-1], e_max=self.ebins[1:],\n e_ref=self.ectr)\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef coords_to_vec(lon, lat):\n phi = np.radians(lon)\n theta = (np.pi / 2) - np.radians(lat)\n sin_t = np.sin(theta)\n cos_t = np.cos(theta)\n\n xVals = sin_t * np.cos(phi)\n yVals = sin_t * np.sin(phi)\n zVals = cos_t\n\n # Stack them into the output array\n out = np.vstack((xVals, yVals, zVals)).swapaxes(0, 1)\n return out", "response": "Converts longitute and latitude coordinates to a unit 3 - vector"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns an estimate of the pixel size from the HEALPix nside coordinate", "response": "def get_pixel_size_from_nside(nside):\n \"\"\" Returns an estimate of the pixel size from the HEALPix nside coordinate\n\n This just uses a lookup table to provide a nice round number for each\n HEALPix order. \n \"\"\"\n order = int(np.log2(nside))\n if order < 0 or order > 13:\n raise ValueError('HEALPix order must be between 0 to 13 %i' % order)\n\n return HPX_ORDER_TO_PIXSIZE[order]"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef hpx_to_axes(h, npix):\n x = h.ebins\n z = np.arange(npix[-1] + 1)\n\n return x, z", "response": "Generates a sequence of bin edge vectors corresponding to the the\n axes of a HPX object."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngenerates an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.", "response": "def hpx_to_coords(h, shape):\n \"\"\" Generate an N x D list of pixel center coordinates where N is\n the number of pixels and D is the dimensionality of the map.\"\"\"\n\n x, z = hpx_to_axes(h, shape)\n\n x = np.sqrt(x[0:-1] * x[1:])\n z = z[:-1] + 0.5\n\n x = np.ravel(np.ones(shape) * x[:, np.newaxis])\n z = np.ravel(np.ones(shape) * z[np.newaxis, :])\n\n return np.vstack((x, z))"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nmake the mapping data needed to from HPX pixelization to WCS - based array.", "response": "def make_hpx_to_wcs_mapping_centers(hpx, wcs):\n \"\"\" Make the mapping data needed to from from HPX pixelization to a\n WCS-based array\n\n Parameters\n ----------\n hpx : `~fermipy.hpx_utils.HPX`\n The healpix mapping (an HPX object)\n\n wcs : `~astropy.wcs.WCS`\n The wcs mapping (a pywcs.wcs object)\n\n Returns\n -------\n ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel \n -1 indicates the wcs pixel does not contain the center of a HEALpix pixel\n mult_val : array(nx,ny) of 1.\n npix : tuple(nx,ny) with the shape of the wcs grid\n\n \"\"\"\n npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2))\n mult_val = np.ones(npix).T.flatten()\n sky_crds = hpx.get_sky_coords()\n pix_crds = wcs.wcs_world2pix(sky_crds, 0).astype(int)\n ipixs = -1 * np.ones(npix, int).T.flatten()\n pix_index = npix[1] * pix_crds[0:, 0] + pix_crds[0:, 1]\n if hpx._ipix is None:\n for ipix, pix_crd in enumerate(pix_index):\n ipixs[pix_crd] = ipix\n else:\n for pix_crd, ipix in zip(pix_index, hpx._ipix):\n ipixs[pix_crd] = ipix\n ipixs = ipixs.reshape(npix).T.flatten()\n return ipixs, mult_val, npix"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef make_hpx_to_wcs_mapping(hpx, wcs):\n npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2))\n pix_crds = np.dstack(np.meshgrid(np.arange(npix[0]),\n np.arange(npix[1]))).swapaxes(0, 1).reshape((npix[0] * npix[1], 2))\n if wcs.wcs.naxis == 2:\n sky_crds = wcs.wcs_pix2world(pix_crds, 0)\n else:\n use_wcs = wcs.dropaxis(2)\n sky_crds = use_wcs.wcs_pix2world(pix_crds, 0)\n \n sky_crds *= np.radians(1.)\n sky_crds[0:, 1] = (np.pi / 2) - sky_crds[0:, 1]\n\n fullmask = np.isnan(sky_crds)\n mask = (fullmask[0:, 0] + fullmask[0:, 1]) == 0\n ipixs = -1 * np.ones(npix, int).T.flatten()\n ipixs[mask] = hp.pixelfunc.ang2pix(hpx.nside, sky_crds[0:, 1][mask],\n sky_crds[0:, 0][mask], hpx.nest)\n\n # Here we are counting the number of HEALPix pixels each WCS pixel points to;\n # this could probably be vectorized by filling a histogram.\n d_count = {}\n for ipix in ipixs:\n if ipix in d_count:\n d_count[ipix] += 1\n else:\n d_count[ipix] = 1\n\n # Here we are getting a multiplicative factor that tells use how to split up\n # the counts in each HEALPix pixel (by dividing the corresponding WCS pixels\n # by the number of associated HEALPix pixels).\n # This could also likely be vectorized.\n mult_val = np.ones(ipixs.shape)\n for i, ipix in enumerate(ipixs):\n mult_val[i] /= d_count[ipix]\n\n ipixs = ipixs.reshape(npix).flatten()\n mult_val = mult_val.reshape(npix).flatten()\n return ipixs, mult_val, npix", "response": "Make the mapping data needed to from HPX pixelization to WCS - based array."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nparsing the HPX_REG header keyword into a list of tokens.", "response": "def parse_hpxregion(region):\n \"\"\"Parse the HPX_REG header keyword into a list of tokens.\"\"\"\n\n m = re.match(r'([A-Za-z\\_]*?)\\((.*?)\\)', region)\n if m is None:\n raise Exception('Failed to parse hpx region string.')\n\n if not m.group(1):\n return re.split(',', m.group(2))\n else:\n return [m.group(1)] + re.split(',', m.group(2))"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef upix_to_pix(upix):\n nside = np.power(2, np.floor(np.log2(upix / 4)) / 2).astype(int)\n pix = upix - 4 * np.power(nside, 2)\n return pix, nside", "response": "Get the nside from a unique pixel number."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create_hpx(cls, nside, nest, coordsys='CEL', order=-1, ebins=None,\n region=None, conv=HPX_Conv('FGST_CCUBE'), pixels=None):\n \"\"\"Create a HPX object.\n\n Parameters\n ----------\n nside : int\n HEALPix nside paramter\n\n nest : bool\n True for HEALPix \"NESTED\" indexing scheme, False for \"RING\" scheme.\n\n coordsys : str\n \"CEL\" or \"GAL\"\n\n order : int\n nside = 2**order\n\n ebins : `~numpy.ndarray`\n Energy bin edges\n\n region : str\n Allows for partial-sky mappings\n\n conv : `HPX_Conv`\n Object defining the convention for column names and the like\n \n pixels : `np.array` or `None`\n For use with 'EXPLICIT' region string \n \"\"\"\n return cls(nside, nest, coordsys, order, ebins, \n region=region, conv=conv, pixels=pixels)", "response": "Create a HEALPix object with the specified nside nest and coordsys."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nidentify the convention used to write this file", "response": "def identify_HPX_convention(header):\n \"\"\" Identify the convention used to write this file \"\"\"\n # Hopefully the file contains the HPX_CONV keyword specifying\n # the convention used\n try:\n return header['HPX_CONV']\n except KeyError:\n pass\n\n indxschm = header.get('INDXSCHM', None)\n\n # Try based on the EXTNAME keyword\n extname = header.get('EXTNAME', None)\n if extname == 'HPXEXPOSURES':\n return 'FGST_BEXPCUBE'\n elif extname == 'SKYMAP2':\n if 'COORDTYPE' in header.keys():\n return 'GALPROP'\n else:\n return 'GALPROP2'\n\n # Check for the INDXSCHM keyword\n if indxschm == 'SPARSE':\n return 'FGST_SRCMAP_SPARSE'\n\n # Check the name of the first column\n colname = header['TTYPE1']\n if colname == 'PIX':\n colname = header['TTYPE2']\n\n if colname == 'KEY':\n return 'FGST_SRCMAP_SPARSE'\n elif colname == 'ENERGY1':\n return 'FGST_TEMPLATE'\n elif colname == 'COSBINS':\n return 'FGST_LTCUBE'\n elif colname == 'Bin0':\n return 'GALPROP'\n elif colname in ['CHANNEL1', 'Bin 0']:\n if extname == 'SKYMAP':\n return 'FGST_CCUBE'\n else:\n return 'FGST_SRCMAP'\n else:\n raise ValueError(\"Could not identify HEALPix convention\")"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef create_from_header(cls, header, ebins=None, pixels=None):\n convname = HPX.identify_HPX_convention(header)\n conv = HPX_FITS_CONVENTIONS[convname]\n\n if conv.convname not in ['GALPROP', 'GALPROP2']:\n if header[\"PIXTYPE\"] != \"HEALPIX\":\n raise Exception(\"PIXTYPE != HEALPIX\")\n if header[\"PIXTYPE\"] != \"HEALPIX\":\n raise Exception(\"PIXTYPE != HEALPIX\")\n if header[\"ORDERING\"] == \"RING\":\n nest = False\n elif header[\"ORDERING\"] == \"NESTED\":\n nest = True\n else:\n raise Exception(\"ORDERING != RING | NESTED\")\n\n try:\n order = header[\"ORDER\"]\n except KeyError:\n order = -1\n\n if order < 0:\n nside = header[\"NSIDE\"]\n else:\n nside = -1\n\n try:\n coordsys = header[conv.coordsys]\n except KeyError:\n coordsys = header['COORDSYS']\n\n try:\n region = header[\"HPX_REG\"]\n except KeyError:\n try:\n region = header[\"HPXREGION\"]\n except KeyError:\n region = None\n\n try:\n if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']:\n use_pixels = pixels\n else:\n use_pixels = None\n except KeyError:\n use_pixels = None\n\n return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels)", "response": "Creates an HPX object from a header."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create_from_hdu(cls, hdu, ebins=None):\n convname = HPX.identify_HPX_convention(hdu.header)\n conv = HPX_FITS_CONVENTIONS[convname]\n try:\n pixels = hdu.data[conv.idxstring]\n except KeyError:\n pixels = None\n return cls.create_from_header(hdu.header, ebins, pixels)", "response": "Creates an HPX object from a FITS header."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef make_header(self):\n cards = [fits.Card(\"TELESCOP\", \"GLAST\"),\n fits.Card(\"INSTRUME\", \"LAT\"),\n fits.Card(self._conv.coordsys, self._coordsys),\n fits.Card(\"PIXTYPE\", \"HEALPIX\"),\n fits.Card(\"ORDERING\", self.ordering),\n fits.Card(\"ORDER\", self._order),\n fits.Card(\"NSIDE\", self._nside),\n fits.Card(\"FIRSTPIX\", 0),\n fits.Card(\"LASTPIX\", self._maxpix - 1),\n fits.Card(\"HPX_CONV\", self._conv.convname)]\n\n if self._coordsys == \"CEL\":\n cards.append(fits.Card(\"EQUINOX\", 2000.0,\n \"Equinox of RA & DEC specifications\"))\n\n if self._region is not None:\n cards.append(fits.Card(\"HPX_REG\", self._region))\n cards.append(fits.Card(\"INDXSCHM\", \"PARTIAL\"))\n elif self._ipix is not None:\n cards.append(fits.Card(\"INDXSCHM\", \"EXPLICIT\")) \n else:\n if self._conv.convname in ['FGST_SRCMAP_SPARSE']:\n cards.append(fits.Card(\"INDXSCHM\", \"SPARSE\"))\n else:\n cards.append(fits.Card(\"INDXSCHM\", \"IMPLICIT\"))\n\n header = fits.Header(cards)\n return header", "response": "Builds and returns a FITS header for this HEALPix map."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nbuild and returns a FITs HDU with input data", "response": "def make_hdu(self, data, **kwargs):\n \"\"\" Builds and returns a FITs HDU with input data\n\n data : The data begin stored\n\n Keyword arguments\n -------------------\n extname : The HDU extension name \n colbase : The prefix for column names\n \"\"\"\n shape = data.shape\n extname = kwargs.get('extname', self.conv.extname)\n\n if shape[-1] != self._npix:\n raise Exception(\n \"Size of data array does not match number of pixels\")\n cols = []\n\n if self._ipix is not None:\n cols.append(fits.Column(self.conv.idxstring, \"J\", array=self._ipix))\n\n if self.conv.convname == 'FGST_SRCMAP_SPARSE':\n nonzero = data.nonzero()\n nfilled = len(nonzero[0])\n if len(shape) == 1:\n cols.append(fits.Column(\"PIX\", \"J\", array=nonzero[0].astype(int)))\n cols.append(fits.Column(\"VALUE\", \"E\", array=data.flat[nonzero].astype(float).reshape(nfilled)))\n elif len(shape) == 2:\n keys = self._npix * nonzero[0] + nonzero[1]\n cols.append(fits.Column(\"PIX\", \"J\", array=nonzero[1].reshape(nfilled)))\n cols.append(fits.Column(\"CHANNEL\", \"I\", array=nonzero[0].reshape(nfilled)))\n cols.append(fits.Column(\"VALUE\", \"E\",\n array=data.flat[keys].astype(float).reshape(nfilled)))\n else:\n raise Exception(\"HPX.write_fits only handles 1D and 2D maps\")\n\n else:\n if len(shape) == 1:\n cols.append(fits.Column(self.conv.colname(\n indx=self.conv.firstcol), \"E\", array=data.astype(float)))\n elif len(shape) == 2:\n for i in range(shape[0]):\n cols.append(fits.Column(self.conv.colname(\n indx=i + self.conv.firstcol), \"E\", array=data[i].astype(float)))\n else:\n raise Exception(\"HPX.write_fits only handles 1D and 2D maps\")\n\n header = self.make_header()\n hdu = fits.BinTableHDU.from_columns(cols, header=header, name=extname)\n\n return hdu"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef make_energy_bounds_hdu(self, extname=\"EBOUNDS\"):\n if self._ebins is None:\n return None\n cols = [fits.Column(\"CHANNEL\", \"I\", array=np.arange(1, len(self._ebins + 1))),\n fits.Column(\"E_MIN\", \"1E\", unit='keV',\n array=1000 * self._ebins[0:-1]),\n fits.Column(\"E_MAX\", \"1E\", unit='keV', array=1000 * self._ebins[1:])]\n hdu = fits.BinTableHDU.from_columns(\n cols, self.make_header(), name=extname)\n return hdu", "response": "Builds and returns a FITs HDU with the energy bin boundries\n ArcGIS."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nbuild and returns a FITs HDU with the energy bin boundries .", "response": "def make_energies_hdu(self, extname=\"ENERGIES\"):\n \"\"\" Builds and returns a FITs HDU with the energy bin boundries\n\n extname : The HDU extension name \n \"\"\"\n if self._evals is None:\n return None\n cols = [fits.Column(\"ENERGY\", \"1E\", unit='MeV',\n array=self._evals)]\n hdu = fits.BinTableHDU.from_columns(\n cols, self.make_header(), name=extname)\n return hdu"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nwrites input data to a FITS file.", "response": "def write_fits(self, data, outfile, extname=\"SKYMAP\", clobber=True):\n \"\"\" Write input data to a FITS file\n\n data : The data begin stored\n outfile : The name of the output file\n extname : The HDU extension name \n clobber : True -> overwrite existing files\n \"\"\"\n hdu_prim = fits.PrimaryHDU()\n hdu_hpx = self.make_hdu(data, extname=extname)\n hl = [hdu_prim, hdu_hpx]\n if self.conv.energy_hdu == 'EBOUNDS':\n hdu_energy = self.make_energy_bounds_hdu()\n elif self.conv.energy_hdu == 'ENERGIES':\n hdu_energy = self.make_energies_hdu()\n if hdu_energy is not None:\n hl.append(hdu_energy)\n hdulist = fits.HDUList(hl)\n hdulist.writeto(outfile, overwrite=clobber)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the list of pixels indices for all the pixels in a HEALPix nest.", "response": "def get_index_list(nside, nest, region):\n \"\"\" Returns the list of pixels indices for all the pixels in a region\n\n nside : HEALPix nside parameter\n nest : True for 'NESTED', False = 'RING'\n region : HEALPix region string\n \"\"\"\n\n tokens = parse_hpxregion(region)\n if tokens[0] == 'DISK':\n vec = coords_to_vec(float(tokens[1]), float(tokens[2]))\n ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])),\n inclusive=False, nest=nest)\n elif tokens[0] == 'DISK_INC':\n vec = coords_to_vec(float(tokens[1]), float(tokens[2]))\n ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])),\n inclusive=True, fact=int(tokens[4]),\n nest=nest)\n elif tokens[0] == 'HPX_PIXEL':\n nside_pix = int(tokens[2])\n if tokens[1] == 'NESTED':\n ipix_ring = hp.nest2ring(nside_pix, int(tokens[3]))\n elif tokens[1] == 'RING':\n ipix_ring = int(tokens[3])\n else:\n raise Exception(\n \"Did not recognize ordering scheme %s\" % tokens[1])\n ilist = match_hpx_pixel(nside, nest, nside_pix, ipix_ring)\n else:\n raise Exception(\n \"HPX.get_index_list did not recognize region type %s\" % tokens[0])\n return ilist"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_ref_dir(region, coordsys):\n if region is None:\n if coordsys == \"GAL\":\n c = SkyCoord(0., 0., frame=Galactic, unit=\"deg\")\n elif coordsys == \"CEL\":\n c = SkyCoord(0., 0., frame=ICRS, unit=\"deg\")\n return c\n\n tokens = parse_hpxregion(region)\n if tokens[0] in ['DISK', 'DISK_INC']:\n if coordsys == \"GAL\":\n c = SkyCoord(float(tokens[1]), float(\n tokens[2]), frame=Galactic, unit=\"deg\")\n elif coordsys == \"CEL\":\n c = SkyCoord(float(tokens[1]), float(\n tokens[2]), frame=ICRS, unit=\"deg\")\n return c\n elif tokens[0] == 'HPX_PIXEL':\n nside_pix = int(tokens[2])\n ipix_pix = int(tokens[3])\n if tokens[1] == 'NESTED':\n nest_pix = True\n elif tokens[1] == 'RING':\n nest_pix = False\n else:\n raise Exception(\n \"Did not recognize ordering scheme %s\" % tokens[1])\n theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix)\n lat = np.degrees((np.pi / 2) - theta)\n lon = np.degrees(phi)\n if coordsys == \"GAL\":\n c = SkyCoord(lon, lat, frame=Galactic, unit=\"deg\")\n elif coordsys == \"CEL\":\n c = SkyCoord(lon, lat, frame=ICRS, unit=\"deg\")\n return c\n else:\n raise Exception(\n \"HPX.get_ref_dir did not recognize region type %s\" % tokens[0])\n return None", "response": "Finds and returns the reference direction for a given HEALPix region string."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nfind and returns the approximate size of a HEALPix region string.", "response": "def get_region_size(region):\n \"\"\" Finds and returns the approximate size of region (in degrees) \n from a HEALPix region string. \n \"\"\"\n if region is None:\n return 180.\n tokens = parse_hpxregion(region)\n if tokens[0] in ['DISK', 'DISK_INC']:\n return float(tokens[3])\n elif tokens[0] == 'HPX_PIXEL':\n pixel_size = get_pixel_size_from_nside(int(tokens[2]))\n return 2. * pixel_size\n else:\n raise Exception(\n \"HPX.get_region_size did not recognize region type %s\" % tokens[0])\n return None"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef make_wcs(self, naxis=2, proj='CAR', energies=None, oversample=2):\n w = WCS(naxis=naxis)\n skydir = self.get_ref_dir(self._region, self.coordsys)\n\n if self.coordsys == 'CEL':\n w.wcs.ctype[0] = 'RA---%s' % (proj)\n w.wcs.ctype[1] = 'DEC--%s' % (proj)\n w.wcs.crval[0] = skydir.ra.deg\n w.wcs.crval[1] = skydir.dec.deg\n elif self.coordsys == 'GAL':\n w.wcs.ctype[0] = 'GLON-%s' % (proj)\n w.wcs.ctype[1] = 'GLAT-%s' % (proj)\n w.wcs.crval[0] = skydir.galactic.l.deg\n w.wcs.crval[1] = skydir.galactic.b.deg\n else:\n raise Exception('Unrecognized coordinate system.')\n\n pixsize = get_pixel_size_from_nside(self.nside)\n roisize = self.get_region_size(self._region)\n allsky = False\n if roisize > 45:\n roisize = 90\n allsky = True\n\n npixels = int(2. * roisize / pixsize) * oversample\n crpix = npixels / 2.\n\n if allsky:\n w.wcs.crpix[0] = 2 * crpix\n npix = (2 * npixels, npixels)\n else:\n w.wcs.crpix[0] = crpix\n npix = (npixels, npixels)\n\n w.wcs.crpix[1] = crpix\n w.wcs.cdelt[0] = -pixsize / oversample\n w.wcs.cdelt[1] = pixsize / oversample\n\n if naxis == 3:\n w.wcs.crpix[2] = 1\n w.wcs.ctype[2] = 'Energy'\n if energies is not None:\n w.wcs.crval[2] = 10 ** energies[0]\n w.wcs.cdelt[2] = 10 ** energies[1] - 10 ** energies[0]\n\n w = WCS(w.to_header())\n wcs_proj = WCSProj(w, npix)\n return wcs_proj", "response": "Make a WCS object for this HPX pixelization."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ngets the sky coordinates of all the pixels in this pixelization", "response": "def get_sky_coords(self):\n \"\"\" Get the sky coordinates of all the pixels in this pixelization \"\"\"\n if self._ipix is None:\n theta, phi = hp.pix2ang(\n self._nside, list(range(self._npix)), self._nest)\n else:\n theta, phi = hp.pix2ang(self._nside, self._ipix, self._nest)\n\n lat = np.degrees((np.pi / 2) - theta)\n lon = np.degrees(phi)\n return np.vstack([lon, lat]).T"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the indices in the flat array corresponding to a set of coordinates", "response": "def get_pixel_indices(self, lats, lons):\n \"\"\" \"Return the indices in the flat array corresponding to a set of coordinates \"\"\"\n theta = np.radians(90. - lats)\n phi = np.radians(lons)\n return hp.ang2pix(self.nside, theta, phi, self.nest)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the pixel index of a SkyCoord object.", "response": "def skydir_to_pixel(self, skydir):\n \"\"\"Return the pixel index of a SkyCoord object.\"\"\"\n if self.coordsys in ['CEL', 'EQU']:\n skydir = skydir.transform_to('icrs')\n lon = skydir.ra.deg\n lat = skydir.dec.deg\n else:\n skydir = skydir.transform_to('galactic')\n lon = skydir.l.deg\n lat = skydir.b.deg\n\n return self.get_pixel_indices(lat, lon)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef write_to_fitsfile(self, fitsfile, clobber=True):\n from fermipy.skymap import Map\n hpx_header = self._hpx.make_header()\n index_map = Map(self.ipixs, self.wcs)\n mult_map = Map(self.mult_val, self.wcs)\n prim_hdu = index_map.create_primary_hdu()\n mult_hdu = index_map.create_image_hdu()\n for key in ['COORDSYS', 'ORDERING', 'PIXTYPE',\n 'ORDERING', 'ORDER', 'NSIDE',\n 'FIRSTPIX', 'LASTPIX']:\n prim_hdu.header[key] = hpx_header[key]\n mult_hdu.header[key] = hpx_header[key]\n\n hdulist = fits.HDUList([prim_hdu, mult_hdu])\n hdulist.writeto(fitsfile, overwrite=clobber)", "response": "Write this mapping to a FITS file."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef create_from_fitsfile(cls, fitsfile):\n from fermipy.skymap import Map\n index_map = Map.create_from_fits(fitsfile)\n mult_map = Map.create_from_fits(fitsfile, hdu=1)\n ff = fits.open(fitsfile)\n hpx = HPX.create_from_hdu(ff[0])\n mapping_data = dict(ipixs=index_map.counts,\n mult_val=mult_map.counts,\n npix=mult_map.counts.shape)\n return cls(hpx, index_map.wcs, mapping_data)", "response": "Read a FITS file and use it to make a mapping\n "} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nfills the wcs map from the input HEALPix data using the pre - calculated mappings from the input HEALPix data.", "response": "def fill_wcs_map_from_hpx_data(self, hpx_data, wcs_data, normalize=True):\n \"\"\"Fills the wcs map from the hpx data using the pre-calculated\n mappings\n\n hpx_data : the input HEALPix data\n wcs_data : the data array being filled\n normalize : True -> perserve integral by splitting HEALPix values between bins\n\n \"\"\"\n # FIXME, there really ought to be a better way to do this\n \n hpx_naxis = len(hpx_data.shape)\n wcs_naxis = len(wcs_data.shape)\n\n if hpx_naxis + 1 != wcs_naxis:\n raise ValueError(\"HPX.fill_wcs_map_from_hpx_data: HPX naxis should be 1 less that WCS naxis: %i, %i\"%(hpx_naxis, wcs_naxis))\n if hpx_naxis == 2:\n if hpx_data.shape[1] != wcs_data.shape[2]:\n raise ValueError(\"HPX.fill_wcs_map_from_hpx_data: size of energy axes don't match: %i, %i\"%(hpx_naxis[1], wcs_naxis[2]))\n\n lmap_valid = self._lmap[self._valid]\n wcs_layer_shape = wcs_data.shape[0]*wcs_data.shape[1]\n if hpx_naxis == 2:\n for i in range(hpx_data.shape[1]):\n wcs_data_layer = np.zeros(wcs_layer_shape)\n wcs_data_layer[self._valid] = hpx_data[:,i][lmap_valid]\n orig_value = wcs_data_layer.sum()\n if normalize:\n wcs_data_layer *= self._mult_val\n wcs_data[:,:,i].flat = wcs_data_layer\n else:\n wcs_data_flat = np.zeros(wcs_layer_shape)\n wcs_data_flat[self._valid] = hpx_data[lmap_valid]\n if normalize:\n wcs_data_flat *= self._mult_val \n wcs_data.flat = wcs_data_flat"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncreate a WCS map from the HEALPix data", "response": "def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True):\n \"\"\" Creates and fills a wcs map from the hpx data using the pre-calculated\n mappings\n\n hpx_data : the input HEALPix data\n wcs : the WCS object\n normalize : True -> perserve integral by splitting HEALPix values between bins\n \"\"\"\n wcs_data = np.zeros(wcs.npix)\n self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize)\n return wcs_data"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets the number of energy bins in the SED", "response": "def _get_enum_bins(configfile):\n \"\"\"Get the number of energy bin in the SED\n\n Parameters\n ----------\n\n configfile : str\n Fermipy configuration file.\n\n Returns\n -------\n\n nbins : int\n The number of energy bins\n\n \"\"\"\n config = yaml.safe_load(open(configfile))\n\n emin = config['selection']['emin']\n emax = config['selection']['emax']\n log_emin = np.log10(emin)\n log_emax = np.log10(emax)\n ndec = log_emax - log_emin\n binsperdec = config['binning']['binsperdec']\n nebins = int(np.round(binsperdec * ndec))\n\n return nebins"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nfilling the arrays from the files in filelist collist", "response": "def fill_output_table(filelist, hdu, collist, nbins):\n \"\"\"Fill the arrays from the files in filelist\n\n Parameters\n ----------\n\n filelist : list\n List of the files to get data from.\n\n hdu : str\n Name of the HDU containing the table with the input data.\n\n colllist : list\n List of the column names\n\n nbins : int\n Number of bins in the input data arrays\n\n Returns\n -------\n\n table : astropy.table.Table\n A table with all the requested data extracted.\n\n \"\"\"\n nfiles = len(filelist)\n shape = (nbins, nfiles)\n outdict = {}\n for c in collist:\n outdict[c['name']] = np.ndarray(shape)\n\n sys.stdout.write('Working on %i files: ' % nfiles)\n sys.stdout.flush()\n for i, f in enumerate(filelist):\n sys.stdout.write('.')\n sys.stdout.flush()\n tab = Table.read(f, hdu)\n for c in collist:\n cname = c['name']\n outdict[cname][:, i] = tab[cname].data\n sys.stdout.write('!\\n')\n outcols = []\n for c in collist:\n cname = c['name']\n if 'unit' in c:\n col = Column(data=outdict[cname], name=cname,\n dtype=np.float, shape=nfiles, unit=c['unit'])\n else:\n col = Column(data=outdict[cname], name=cname,\n dtype=np.float, shape=nfiles)\n outcols.append(col)\n tab = Table(data=outcols)\n return tab"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncollecting summary statisitics from an array of summary data", "response": "def collect_summary_stats(data):\n \"\"\"Collect summary statisitics from an array\n\n This creates a dictionry of output arrays of summary\n statistics, with the input array dimension reducted by one.\n\n Parameters\n ----------\n\n data : `numpy.ndarray`\n Array with the collected input data\n\n\n Returns\n -------\n\n output : dict\n Dictionary of `np.ndarray` with the summary data.\n These include mean, std, median, and 4 quantiles (0.025, 0.16, 0.86, 0.975).\n\n \"\"\"\n mean = np.mean(data, axis=0)\n std = np.std(data, axis=0)\n median = np.median(data, axis=0)\n q02, q16, q84, q97 = np.percentile(data, [2.5, 16, 84, 97.5], axis=0)\n\n o = dict(mean=mean,\n std=std,\n median=median,\n q02=q02,\n q16=q16,\n q84=q84,\n q97=q97)\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef add_summary_stats_to_table(table_in, table_out, colnames):\n for col in colnames:\n col_in = table_in[col]\n stats = collect_summary_stats(col_in.data)\n for k, v in stats.items():\n out_name = \"%s_%s\" % (col, k)\n col_out = Column(data=np.vstack(\n [v]), name=out_name, dtype=col_in.dtype, shape=v.shape, unit=col_in.unit)\n table_out.add_column(col_out)", "response": "Collect summary statisitics from an input table and add them to an output table."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nbuilds a stats summary table for a table that has all the SED results", "response": "def summarize_sed_results(sed_table):\n \"\"\"Build a stats summary table for a table that has all the SED results \"\"\"\n del_cols = ['dnde', 'dnde_err', 'dnde_errp', 'dnde_errn', 'dnde_ul',\n 'e2dnde', 'e2dnde_err', 'e2dnde_errp', 'e2dnde_errn', 'e2dnde_ul',\n 'norm', 'norm_err', 'norm_errp', 'norm_errn', 'norm_ul',\n 'ts']\n stats_cols = ['dnde', 'dnde_ul',\n 'e2dnde', 'e2dnde_ul',\n 'norm', 'norm_ul']\n\n table_out = Table(sed_table[0])\n table_out.remove_columns(del_cols)\n add_summary_stats_to_table(sed_table, table_out, stats_cols)\n return table_out"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(\n args, require_sim_name=True)\n if targets_yaml is None:\n return job_configs\n\n write_full = args['write_full']\n\n targets = load_yaml(targets_yaml)\n\n base_config = dict(config=args['config'],\n nsims=args['nsims'],\n seed=args['seed'])\n\n first = args['seed']\n last = first + args['nsims'] - 1\n\n for target_name, profile_list in targets.items():\n for profile in profile_list:\n full_key = \"%s:%s:%s\" % (target_name, profile, sim)\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n profile=profile,\n fullpath=True)\n sed_file = NAME_FACTORY.sim_sedfile(**name_keys)\n outfile = sed_file.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' % (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = sed_file.replace(\n '_SEED.fits', '_summary_%06i_%06i.fits' % (first, last))\n job_config = base_config.copy()\n job_config.update(dict(sed_file=sed_file,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs", "response": "Hook to build job configurations from the given arguments"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nupdate the base dictionary with the values from a YAML file.", "response": "def update_base_dict(self, yamlfile):\n \"\"\"Update the values in baseline dictionary used to resolve names\n \"\"\"\n self.base_dict.update(**yaml.safe_load(open(yamlfile)))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _format_from_dict(self, format_string, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n localpath = format_string.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "Return a formatted file name dictionary components"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef sim_sedfile(self, **kwargs):\n if 'seed' not in kwargs:\n kwargs['seed'] = 'SEED'\n return self._format_from_dict(NameFactory.sim_sedfile_format, **kwargs)", "response": "Return the name for the simulated SED file for a particular targetCOOKIE"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the path for a stamp file for a scatter gather job", "response": "def stamp(self, **kwargs):\n \"\"\"Return the path for a stamp file for a scatter gather job\"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n return NameFactory.stamp_format.format(**kwargs_copy)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ngets the name of the targetfile based on the job arguments", "response": "def resolve_targetfile(self, args, require_sim_name=False): # x\n \"\"\"Get the name of the targetfile based on the job arguments\"\"\"\n ttype = args.get('ttype')\n if is_null(ttype):\n sys.stderr.write('Target type must be specified')\n return (None, None)\n\n sim = args.get('sim')\n if is_null(sim):\n if require_sim_name:\n sys.stderr.write('Simulation scenario must be specified')\n return (None, None)\n else:\n sim = None\n\n name_keys = dict(target_type=ttype,\n targetlist='target_list.yaml',\n sim_name=sim,\n fullpath=True)\n if sim is None:\n targetfile = self.targetfile(**name_keys)\n else:\n targetfile = self.sim_targetfile(**name_keys)\n\n targets_override = args.get('targetfile')\n if is_not_null(targets_override):\n targetfile = targets_override\n\n return (targetfile, sim)"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nresolves the name of the specturm file based on the job arguments", "response": "def resolve_randconfig(self, args):\n \"\"\"Get the name of the specturm file based on the job arguments\"\"\"\n ttype = args.get('ttype')\n if is_null(ttype):\n sys.stderr.write('Target type must be specified')\n return None\n name_keys = dict(target_type=ttype,\n fullpath=True)\n randconfig = self.randconfig(**name_keys)\n rand_override = args.get('rand_config')\n if is_not_null(rand_override):\n randconfig = rand_override\n return randconfig"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nnoting that gtmktime will be skipped if no FT2 file is provided.", "response": "def main():\n\n usage = \"usage: %(prog)s [options] \"\n description = \"Run gtselect and gtmktime on one or more FT1 files. \"\n \"Note that gtmktime will be skipped if no FT2 file is provided.\"\n parser = argparse.ArgumentParser(usage=usage, description=description)\n\n add_lsf_args(parser)\n parser.add_argument('--zmax', default=100., type=float, help='')\n parser.add_argument('--dcostheta', default=0.025, type=float, help='')\n parser.add_argument('--binsz', default=1.0, type=float, help='')\n parser.add_argument('--outdir', default=None, type=str,\n help='Path to output directory used when merge=False.')\n parser.add_argument('--outfile', default=None, type=str,\n help='Path to output file used when merge=True.')\n parser.add_argument('--scfile', default=None, type=str, help='',\n required=True)\n\n parser.add_argument('--dry_run', default=False, action='store_true')\n parser.add_argument('--overwrite', default=False, action='store_true')\n parser.add_argument('--merge', default=False, action='store_true',\n help='Merge input FT1 files into a single file.')\n\n parser.add_argument('files', nargs='+', default=None,\n help='List of directories in which the analysis will '\n 'be run.')\n\n args = parser.parse_args()\n\n args.outdir = os.path.abspath(args.outdir)\n args.scfile = os.path.abspath(args.scfile)\n mkdir(args.outdir)\n input_files = [[os.path.abspath(x)] for x in args.files]\n output_files = [os.path.join(args.outdir, os.path.basename(x))\n for x in args.files]\n\n if args.batch:\n opts = copy.deepcopy(args.__dict__)\n opts.pop('files')\n opts.pop('batch')\n submit_jobs('python ' + os.path.abspath(__file__.rstrip('cd')),\n input_files, output_files, {k: v for k, v in opts.items()})\n sys.exit(0)\n\n logger = Logger.get(os.path.basename(__file__), None, logging.INFO)\n logger.info('Starting.')\n cwd = os.getcwd()\n user = os.environ['USER']\n tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch')\n os.chdir(tmpdir)\n\n logger.info('tmpdir %s', tmpdir)\n logger.info('outdir %s', args.outdir)\n logger.info('outfile %s', args.outfile)\n\n for infiles, outfile in zip(input_files, output_files):\n\n logger.info('infiles %s', pprint.pformat(infiles))\n logger.info('outfile %s', outfile)\n\n kw = dict(evfile='list.txt',\n scfile=args.scfile,\n outfile='ltcube.fits',\n binsz=args.binsz,\n dcostheta=args.dcostheta,\n zmax=args.zmax)\n\n create_filelist(infiles, 'list.txt')\n staged_outfile = kw['outfile']\n run_gtapp('gtltcube', logger, kw)\n logger.info('cp %s %s', staged_outfile, outfile)\n shutil.copy(staged_outfile, outfile)\n\n os.chdir(cwd)\n logger.info('Deleting %s', tmpdir)\n shutil.rmtree(tmpdir)\n logger.info('Done.')"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef convert_sed_cols(tab):\n # Update Column names\n for colname in list(tab.columns.keys()):\n\n newname = colname.lower()\n newname = newname.replace('dfde', 'dnde')\n\n if tab.columns[colname].name == newname:\n continue\n\n tab.columns[colname].name = newname\n\n return tab", "response": "Cast SED column names to lowercase."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the derivative a an array of input values", "response": "def derivative(self, x, der=1):\n \"\"\" return the derivative a an array of input values\n\n x : the inputs \n der : the order of derivative \n \"\"\"\n from scipy.interpolate import splev\n return splev(x, self._sp, der=der)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _compute_mle(self):\n min_y = np.min(self._interp.y)\n if self._interp.y[0] == min_y:\n self._mle = self._interp.x[0]\n elif self._interp.y[-1] == min_y:\n self._mle = self._interp.x[-1]\n else:\n argmin_y = np.argmin(self._interp.y)\n ix0 = max(argmin_y - 4, 0)\n ix1 = min(argmin_y + 4, len(self._interp.x) - 1)\n\n while np.sign(self._interp.derivative(self._interp.x[ix0])) == \\\n np.sign(self._interp.derivative(self._interp.x[ix1])):\n ix0 += 1\n\n self._mle = scipy.optimize.brentq(self._interp.derivative,\n self._interp.x[ix0],\n self._interp.x[ix1],\n xtol=1e-10 *\n np.median(self._interp.x))", "response": "Compute the maximum likelihood estimate."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nfinds the point at which the log - likelihood changes by a given value with respect to its value at the MLE.", "response": "def getDeltaLogLike(self, dlnl, upper=True):\n \"\"\"Find the point at which the log-likelihood changes by a\n given value with respect to its value at the MLE.\"\"\"\n mle_val = self.mle()\n # A little bit of paranoia to avoid zeros\n if mle_val <= 0.:\n mle_val = self._interp.xmin\n if mle_val <= 0.:\n mle_val = self._interp.x[1]\n log_mle = np.log10(mle_val)\n lnl_max = self.fn_mle()\n\n # This ultra-safe code to find an absolute maximum\n # fmax = self.fn_mle()\n # m = (fmax-self.interp.y > 0.1+dlnl) & (self.interp.x>self._mle)\n\n # if sum(m) == 0:\n # xmax = self.interp.x[-1]*10\n # else:\n # xmax = self.interp.x[m][0]\n\n # Matt has found that it is faster to use an interpolator\n # than an actual root-finder to find the root,\n # probably b/c of python overhead.\n # That would be something like this:\n # rf = lambda x: self._interp(x)+dlnl-lnl_max\n # return opt.brentq(rf,self._mle,self._interp.xmax,\n # xtol=1e-10*np.abs(self._mle))\n if upper:\n x = np.logspace(log_mle, np.log10(self._interp.xmax), 100)\n retVal = np.interp(dlnl, self.interp(x) - lnl_max, x)\n else:\n x = np.linspace(self._interp.xmin, self._mle, 100)\n retVal = np.interp(dlnl, self.interp(x)[::-1] - lnl_max, x[::-1])\n\n return retVal"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nevaluating the limits corresponding to a C. L. of alpha%.", "response": "def getLimit(self, alpha, upper=True):\n \"\"\" Evaluate the limits corresponding to a C.L. of (1-alpha)%.\n\n Parameters\n ----------\n alpha : limit confidence level.\n upper : upper or lower limits.\n \"\"\"\n dlnl = onesided_cl_to_dlnl(1.0 - alpha)\n return self.getDeltaLogLike(dlnl, upper=upper)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nevaluating the interval corresponding to a C. L of alpha%.", "response": "def getInterval(self, alpha):\n \"\"\" Evaluate the interval corresponding to a C.L. of (1-alpha)%.\n\n Parameters\n ----------\n alpha : limit confidence level.\n \"\"\"\n dlnl = twosided_cl_to_dlnl(1.0 - alpha)\n lo_lim = self.getDeltaLogLike(dlnl, upper=False)\n hi_lim = self.getDeltaLogLike(dlnl, upper=True)\n return (lo_lim, hi_lim)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncreates a new object from a table.", "response": "def create_from_table(cls, tab_e):\n \"\"\"\n Parameters\n ----------\n tab_e : `~astropy.table.Table`\n EBOUNDS table. \n \"\"\"\n\n convert_sed_cols(tab_e)\n try:\n emin = np.array(tab_e['e_min'].to(u.MeV))\n emax = np.array(tab_e['e_max'].to(u.MeV))\n except:\n emin = np.array(tab_e['e_min'])\n emax = np.array(tab_e['e_max'])\n\n ne = len(emin)\n try:\n ref_dnde = np.array(tab_e['ref_dnde'])\n except:\n ref_dnde = np.ones((ne))\n\n try:\n ref_flux = np.array(tab_e['ref_flux'])\n except:\n ref_flux = np.ones((ne))\n\n try:\n ref_eflux = np.array(tab_e['ref_eflux'])\n except:\n ref_eflux = np.ones((ne))\n\n try:\n ref_npred = np.array(tab_e['ref_npred'])\n except:\n ref_npred = np.ones((ne))\n\n return cls(emin, emax, ref_dnde, ref_flux, ref_eflux, ref_npred)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef build_ebound_table(self):\n cols = [\n Column(name=\"E_MIN\", dtype=float, data=self._emin, unit='MeV'),\n Column(name=\"E_MAX\", dtype=float, data=self._emax, unit='MeV'),\n Column(name=\"E_REF\", dtype=float, data=self._eref, unit='MeV'),\n Column(name=\"REF_DNDE\", dtype=float, data=self._ref_dnde,\n unit='ph / (MeV cm2 s)'),\n Column(name=\"REF_FLUX\", dtype=float, data=self._ref_flux,\n unit='ph / (cm2 s)'),\n Column(name=\"REF_EFLUX\", dtype=float, data=self._ref_eflux,\n unit='MeV / (cm2 s)'),\n Column(name=\"REF_NPRED\", dtype=float, data=self._ref_npred,\n unit='ph')\n ]\n tab = Table(data=cols)\n return tab", "response": "Build and return an EBOUNDS table with the encapsulated data."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns the derivate of the log - like summed over the energy bins.", "response": "def derivative(self, x, der=1):\n \"\"\"Return the derivate of the log-like summed over the energy\n bins\n\n Parameters\n ----------\n x : `~numpy.ndarray`\n Array of N x M values\n\n der : int\n Order of the derivate\n\n Returns\n -------\n der_val : `~numpy.ndarray`\n Array of negative log-likelihood values.\n \"\"\"\n if len(x.shape) == 1:\n der_val = np.zeros((1))\n else:\n der_val = np.zeros((x.shape[1:]))\n\n for i, xv in enumerate(x):\n der_val += self._loglikes[i].interp.derivative(xv, der=der)\n return der_val"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef mles(self):\n mle_vals = np.ndarray((self._nx))\n for i in range(self._nx):\n mle_vals[i] = self._loglikes[i].mle()\n return mle_vals", "response": "return the maximum likelihood estimates for each of the energy bins\n "} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn test statistic values for each energy bin", "response": "def ts_vals(self):\n \"\"\" returns test statistic values for each energy bin\n \"\"\"\n ts_vals = np.ndarray((self._nx))\n for i in range(self._nx):\n ts_vals[i] = self._loglikes[i].TS()\n\n return ts_vals"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef chi2_vals(self, x):\n\n chi2_vals = np.ndarray((self._nx))\n for i in range(self._nx):\n\n mle = self._loglikes[i].mle()\n nll0 = self._loglikes[i].interp(mle)\n nll1 = self._loglikes[i].interp(x[i])\n chi2_vals[i] = 2.0 * np.abs(nll0 - nll1)\n\n return chi2_vals", "response": "Compute the difference in the log - likelihood between the MLE in each energy bin and the normalization predicted by the global best - fit model."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef getLimits(self, alpha, upper=True):\n limit_vals = np.ndarray((self._nx))\n\n for i in range(self._nx):\n limit_vals[i] = self._loglikes[i].getLimit(alpha, upper)\n\n return limit_vals", "response": "Evaluate the limits corresponding to a C. L of alpha%."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nevaluating the two - sided intervals corresponding to a C. L. of alpha%.", "response": "def getIntervals(self, alpha):\n \"\"\" Evaluate the two-sided intervals corresponding to a C.L. of\n (1-alpha)%.\n\n Parameters\n ----------\n alpha : float\n limit confidence level.\n\n Returns\n -------\n limit_vals_hi : `~numpy.ndarray`\n An array of lower limit values.\n\n limit_vals_lo : `~numpy.ndarray`\n An array of upper limit values.\n \"\"\"\n limit_vals_lo = np.ndarray((self._nx))\n limit_vals_hi = np.ndarray((self._nx))\n\n for i in range(self._nx):\n lo_lim, hi_lim = self._loglikes[i].getInterval(alpha)\n limit_vals_lo[i] = lo_lim\n limit_vals_hi[i] = hi_lim\n\n return limit_vals_lo, limit_vals_hi"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nfits the normalization given a set of spectral values that define a spectral shape and limits of the derivatvie.", "response": "def fitNormalization(self, specVals, xlims):\n \"\"\"Fit the normalization given a set of spectral values that\n define a spectral shape\n\n This version is faster, and solves for the root of the derivatvie\n\n Parameters\n ----------\n specVals : an array of (nebin values that define a spectral shape\n xlims : fit limits \n\n returns the best-fit normalization value\n \"\"\"\n from scipy.optimize import brentq\n\n def fDeriv(x): return self.norm_derivative(specVals, x)\n try:\n result = brentq(fDeriv, xlims[0], xlims[1])\n except:\n check_underflow = self.__call__(specVals * xlims[0]) < \\\n self.__call__(specVals * xlims[1])\n if check_underflow.any():\n return xlims[0]\n else:\n return xlims[1]\n return result"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef fitNorm_v2(self, specVals):\n from scipy.optimize import fmin\n\n def fToMin(x): return self.__call__(specVals * x)\n result = fmin(fToMin, 0., disp=False, xtol=1e-6)\n return result", "response": "Fit the normalization given a set of spectral values \n that define a spectral shape. This version uses scipy. optimize. fmin."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nfits a spectral function to a set of parameters.", "response": "def fit_spectrum(self, specFunc, initPars, freePars=None):\n \"\"\" Fit for the free parameters of a spectral function\n\n Parameters\n ----------\n specFunc : `~fermipy.spectrum.SpectralFunction`\n The Spectral Function\n\n initPars : `~numpy.ndarray`\n The initial values of the parameters\n\n freePars : `~numpy.ndarray` \n Boolean array indicating which parameters should be free in\n the fit.\n\n Returns\n -------\n params : `~numpy.ndarray`\n Best-fit parameters.\n\n spec_vals : `~numpy.ndarray`\n The values of the best-fit spectral model in each energy bin.\n\n ts_spec : float\n The TS of the best-fit spectrum\n\n chi2_vals : `~numpy.ndarray`\n Array of chi-squared values for each energy bin.\n\n chi2_spec : float\n Global chi-squared value for the sum of all energy bins.\n\n pval_spec : float\n p-value of chi-squared for the best-fit spectrum.\n \"\"\"\n if not isinstance(specFunc, SEDFunctor):\n specFunc = self.create_functor(specFunc, initPars,\n scale=specFunc.scale)\n\n if freePars is None:\n freePars = np.empty(len(initPars), dtype=bool)\n freePars.fill(True)\n\n initPars = np.array(initPars)\n freePars = np.array(freePars)\n\n def fToMin(x):\n\n xp = np.array(specFunc.params)\n xp[freePars] = x\n return self.__call__(specFunc(xp))\n\n result = fmin(fToMin, initPars[freePars], disp=False, xtol=1e-6)\n\n out_pars = specFunc.params\n out_pars[freePars] = np.array(result)\n\n spec_vals = specFunc(out_pars)\n spec_npred = np.zeros(len(spec_vals))\n\n if isinstance(specFunc, spectrum.SEDFluxFunctor):\n spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_flux\n elif isinstance(specFunc, spectrum.SEDEFluxFunctor):\n spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_eflux\n\n ts_spec = self.TS_spectrum(spec_vals)\n chi2_vals = self.chi2_vals(spec_vals)\n chi2_spec = np.sum(chi2_vals)\n pval_spec = stats.distributions.chi2.sf(chi2_spec, len(spec_vals))\n return dict(params=out_pars, spec_vals=spec_vals,\n spec_npred=spec_npred,\n ts_spec=ts_spec, chi2_spec=chi2_spec,\n chi2_vals=chi2_vals, pval_spec=pval_spec)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef build_scandata_table(self):\n shape = self._norm_vals.shape\n col_norm = Column(name=\"norm\", dtype=float)\n col_normv = Column(name=\"norm_scan\", dtype=float,\n shape=shape)\n col_dll = Column(name=\"dloglike_scan\", dtype=float,\n shape=shape)\n tab = Table(data=[col_norm, col_normv, col_dll])\n tab.add_row({\"norm\": 1.,\n \"norm_scan\": self._norm_vals,\n \"dloglike_scan\": -1 * self._nll_vals})\n return tab", "response": "Build an astropy. table. Table object from these data."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncombines the log - likelihoods from a number of components.", "response": "def stack_nll(shape, components, ylims, weights=None):\n \"\"\"Combine the log-likelihoods from a number of components.\n\n Parameters\n ----------\n shape : tuple\n The shape of the return array\n\n components : `~fermipy.castro.CastroData_Base`\n The components to be stacked\n\n weights : array-like\n\n Returns\n -------\n norm_vals : `numpy.ndarray`\n N X M array of Normalization values\n\n nll_vals : `numpy.ndarray`\n N X M array of log-likelihood values\n\n nll_offsets : `numpy.ndarray`\n N array of maximum log-likelihood values in each bin\n\n \"\"\"\n n_bins = shape[0]\n n_vals = shape[1]\n\n if weights is None:\n weights = np.ones((len(components)))\n\n norm_vals = np.zeros(shape)\n nll_vals = np.zeros(shape)\n nll_offsets = np.zeros((n_bins))\n for i in range(n_bins):\n log_min = np.log10(ylims[0])\n log_max = np.log10(ylims[1])\n norm_vals[i, 1:] = np.logspace(log_min, log_max, n_vals - 1)\n for c, w in zip(components, weights):\n nll_vals[i] += w * c[i].interp(norm_vals[i]) - c.nll_offsets[i]\n\n # Reset the offsets\n nll_obj = LnLFn(norm_vals[i], nll_vals[i])\n ll_offset = nll_obj.fn_mle()\n nll_vals[i] -= ll_offset\n nll_offsets[i] = -ll_offset\n pass\n\n return norm_vals, nll_vals, nll_offsets"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef create_from_yamlfile(cls, yamlfile):\n data = load_yaml(yamlfile)\n nebins = len(data)\n emin = np.array([data[i]['emin'] for i in range(nebins)])\n emax = np.array([data[i]['emax'] for i in range(nebins)])\n ref_flux = np.array([data[i]['flux'][1] for i in range(nebins)])\n ref_eflux = np.array([data[i]['eflux'][1] for i in range(nebins)])\n conv = np.array([data[i]['eflux2npred'] for i in range(nebins)])\n ref_npred = conv*ref_eflux\n ones = np.ones(ref_flux.shape)\n ref_spec = ReferenceSpec(emin, emax, ones, ref_flux, ref_eflux, ref_npred)\n norm_data = np.array([data[i]['eflux'] for i in range(nebins)])\n ll_data = np.array([data[i]['logLike'] for i in range(nebins)])\n max_ll = ll_data.max(1)\n nll_data = (max_ll - ll_data.T).T\n return cls(norm_data, nll_data, ref_spec, 'eflux')", "response": "Create a Castro data object from a yaml file contains\n the likelihood data."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncreating a Castro data object from a text file containing differential flux points.", "response": "def create_from_flux_points(cls, txtfile):\n \"\"\"Create a Castro data object from a text file containing a\n sequence of differential flux points.\"\"\"\n\n tab = Table.read(txtfile, format='ascii.ecsv')\n dnde_unit = u.ph / (u.MeV * u.cm ** 2 * u.s)\n loge = np.log10(np.array(tab['e_ref'].to(u.MeV)))\n norm = np.array(tab['norm'].to(dnde_unit))\n norm_errp = np.array(tab['norm_errp'].to(dnde_unit))\n norm_errn = np.array(tab['norm_errn'].to(dnde_unit))\n norm_err = 0.5 * (norm_errp + norm_errn)\n dloge = loge[1:] - loge[:-1]\n dloge = np.insert(dloge, 0, dloge[0])\n emin = 10 ** (loge - dloge * 0.5)\n emax = 10 ** (loge + dloge * 0.5)\n ectr = 10 ** loge\n deltae = emax - emin\n flux = norm * deltae\n eflux = norm * deltae * ectr\n\n ones = np.ones(flux.shape)\n ref_spec = ReferenceSpec(emin, emax, ones, ones, ones, ones)\n\n spec_data = SpecData(ref_spec, norm, flux, eflux, norm_err)\n\n stephi = np.linspace(0, 1, 11)\n steplo = -np.linspace(0, 1, 11)[1:][::-1]\n\n loscale = 3 * norm_err\n hiscale = 3 * norm_err\n loscale[loscale > norm] = norm[loscale > norm]\n\n norm_vals_hi = norm[:, np.newaxis] + \\\n stephi[np.newaxis, :] * hiscale[:, np.newaxis]\n norm_vals_lo = norm[:, np.newaxis] + \\\n steplo[np.newaxis, :] * loscale[:, np.newaxis]\n\n norm_vals = np.hstack((norm_vals_lo, norm_vals_hi))\n nll_vals = 0.5 * \\\n (norm_vals - norm[:, np.newaxis]) ** 2 / \\\n norm_err[:, np.newaxis] ** 2\n\n norm_vals *= flux[:, np.newaxis] / norm[:, np.newaxis]\n\n return cls(norm_vals, nll_vals, spec_data, 'flux')"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_from_tables(cls, norm_type='eflux',\n tab_s=\"SCANDATA\",\n tab_e=\"EBOUNDS\"):\n \"\"\"Create a CastroData object from two tables\n\n Parameters\n ----------\n norm_type : str\n Type of normalization to use. Valid options are:\n\n * norm : Normalization w.r.t. to test source\n * flux : Flux of the test source ( ph cm^-2 s^-1 )\n * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )\n * npred: Number of predicted photons (Not implemented)\n * dnde : Differential flux of the test source ( ph cm^-2 s^-1\n MeV^-1 )\n\n tab_s : str\n table scan data\n\n tab_e : str\n table energy binning and normalization data\n\n Returns\n -------\n castro : `~fermipy.castro.CastroData`\n \"\"\"\n if norm_type in ['flux', 'eflux', 'dnde']:\n norm_vals = np.array(tab_s['norm_scan'] *\n tab_e['ref_%s' % norm_type][:, np.newaxis])\n elif norm_type == \"norm\":\n norm_vals = np.array(tab_s['norm_scan'])\n else:\n raise Exception('Unrecognized normalization type: %s' % norm_type)\n\n nll_vals = -np.array(tab_s['dloglike_scan'])\n\n rs = ReferenceSpec.create_from_table(tab_e)\n return cls(norm_vals, nll_vals, rs, norm_type)", "response": "Create a CastroData object from two tables."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create_from_fits(cls, fitsfile, norm_type='eflux',\n hdu_scan=\"SCANDATA\",\n hdu_energies=\"EBOUNDS\",\n irow=None):\n \"\"\"Create a CastroData object from a tscube FITS file.\n\n Parameters\n ----------\n fitsfile : str\n Name of the fits file\n\n norm_type : str\n Type of normalization to use. Valid options are:\n\n * norm : Normalization w.r.t. to test source\n * flux : Flux of the test source ( ph cm^-2 s^-1 )\n * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )\n * npred: Number of predicted photons (Not implemented)\n * dnde : Differential flux of the test source ( ph cm^-2 s^-1\n MeV^-1 )\n\n hdu_scan : str\n Name of the FITS HDU with the scan data\n\n hdu_energies : str \n Name of the FITS HDU with the energy binning and\n normalization data\n\n irow : int or None\n If none, then this assumes that there is a single row in\n the scan data table Otherwise, this specifies which row of\n the table to use\n\n Returns\n -------\n castro : `~fermipy.castro.CastroData`\n\n \"\"\"\n if irow is not None:\n tab_s = Table.read(fitsfile, hdu=hdu_scan)[irow]\n else:\n tab_s = Table.read(fitsfile, hdu=hdu_scan)\n tab_e = Table.read(fitsfile, hdu=hdu_energies)\n\n tab_s = convert_sed_cols(tab_s)\n tab_e = convert_sed_cols(tab_e)\n\n return cls.create_from_tables(norm_type, tab_s, tab_e)", "response": "Create a CastroData object from a tscube FITS file."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncreating a CastroData object from an SED fits file.", "response": "def create_from_sedfile(cls, fitsfile, norm_type='eflux'):\n \"\"\"Create a CastroData object from an SED fits file\n\n Parameters\n ----------\n fitsfile : str \n Name of the fits file\n\n norm_type : str\n Type of normalization to use, options are:\n\n * norm : Normalization w.r.t. to test source\n * flux : Flux of the test source ( ph cm^-2 s^-1 )\n * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )\n * npred: Number of predicted photons (Not implemented)\n * dnde : Differential flux of the test source ( ph cm^-2 s^-1\n MeV^-1 )\n\n Returns\n -------\n castro : `~fermipy.castro.CastroData`\n \"\"\"\n tab_s = Table.read(fitsfile, hdu=1)\n tab_s = convert_sed_cols(tab_s)\n\n if norm_type in ['flux', 'eflux', 'dnde']:\n ref_colname = 'ref_%s' % norm_type\n norm_vals = np.array(tab_s['norm_scan'] *\n tab_s[ref_colname][:, np.newaxis])\n elif norm_type == \"norm\":\n norm_vals = np.array(tab_s['norm_scan'])\n else:\n raise ValueError('Unrecognized normalization type: %s' % norm_type)\n\n nll_vals = -np.array(tab_s['dloglike_scan'])\n ref_spec = ReferenceSpec.create_from_table(tab_s)\n spec_data = SpecData(ref_spec, tab_s['norm'], tab_s['norm_err'])\n return cls(norm_vals, nll_vals, spec_data, norm_type)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create_from_stack(cls, shape, components, ylims, weights=None):\n if len(components) == 0:\n return None\n norm_vals, nll_vals, nll_offsets = CastroData_Base.stack_nll(shape,\n components,\n ylims,\n weights)\n return cls(norm_vals, nll_vals,\n components[0].refSpec,\n components[0].norm_type)", "response": "Creates a new instance of the class from a stack of log - likelihoods."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn the log - likelihood for a particular spectrum", "response": "def spectrum_loglike(self, specType, params, scale=1E3):\n \"\"\" return the log-likelihood for a particular spectrum\n\n Parameters\n ----------\n specTypes : str\n The type of spectrum to try\n\n params : array-like\n The spectral parameters\n\n scale : float\n The energy scale or 'pivot' energy\n \"\"\"\n sfn = self.create_functor(specType, scale)[0]\n return self.__call__(sfn(params))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create_functor(self, specType, initPars=None, scale=1E3):\n\n emin = self._refSpec.emin\n emax = self._refSpec.emax\n\n fn = SpectralFunction.create_functor(specType,\n self.norm_type,\n emin,\n emax,\n scale=scale)\n\n if initPars is None:\n if specType == 'PowerLaw':\n initPars = np.array([5e-13, -2.0])\n elif specType == 'LogParabola':\n initPars = np.array([5e-13, -2.0, 0.0])\n elif specType == 'PLExpCutoff':\n initPars = np.array([5e-13, -1.0, 1E4])\n\n fn.params = initPars\n return fn", "response": "Create a functor object that computes normalizations in the given spectral model class."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef create_from_fits(cls, fitsfile, norm_type='flux'):\n tsmap = WcsNDMap.read(fitsfile)\n\n tab_e = Table.read(fitsfile, 'EBOUNDS')\n tab_s = Table.read(fitsfile, 'SCANDATA')\n tab_f = Table.read(fitsfile, 'FITDATA')\n\n tab_e = convert_sed_cols(tab_e)\n tab_s = convert_sed_cols(tab_s)\n tab_f = convert_sed_cols(tab_f)\n\n emin = np.array(tab_e['e_min'])\n emax = np.array(tab_e['e_max'])\n try:\n if str(tab_e['e_min'].unit) == 'keV':\n emin /= 1000.\n except:\n pass\n try:\n if str(tab_e['e_max'].unit) == 'keV':\n emax /= 1000.\n except:\n pass\n\n nebins = len(tab_e)\n npred = tab_e['ref_npred']\n\n ndim = len(tsmap.data.shape)\n\n if ndim == 2:\n cube_shape = (tsmap.data.shape[0],\n tsmap.data.shape[1], nebins)\n elif ndim == 1:\n cube_shape = (tsmap.data.shape[0], nebins)\n else:\n raise RuntimeError(\"Counts map has dimension %i\" % (ndim))\n\n refSpec = ReferenceSpec.create_from_table(tab_e)\n nll_vals = -np.array(tab_s[\"dloglike_scan\"])\n norm_vals = np.array(tab_s[\"norm_scan\"])\n\n axis = MapAxis.from_edges(np.concatenate((emin, emax[-1:])),\n interp='log')\n geom_3d = tsmap.geom.to_cube([axis])\n tscube = WcsNDMap(geom_3d,\n np.rollaxis(tab_s[\"ts\"].reshape(cube_shape), 2, 0))\n\n ncube = WcsNDMap(geom_3d,\n np.rollaxis(tab_s[\"norm\"].reshape(cube_shape), 2, 0))\n nmap = WcsNDMap(tsmap.geom,\n tab_f['fit_norm'].reshape(tsmap.data.shape))\n\n ref_colname = 'ref_%s' % norm_type\n norm_vals *= tab_e[ref_colname][np.newaxis, :, np.newaxis]\n\n return cls(tsmap, nmap, tscube, ncube,\n norm_vals, nll_vals, refSpec,\n norm_type)", "response": "Create a TSCube object from a FITS file created by gttscubeFITS."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef castroData_from_ipix(self, ipix, colwise=False):\n # pix = utils.skydir_to_pix\n if colwise:\n ipix = self._tsmap.ipix_swap_axes(ipix, colwise)\n norm_d = self._norm_vals[ipix]\n nll_d = self._nll_vals[ipix]\n return CastroData(norm_d, nll_d, self._refSpec, self._norm_type)", "response": "Build a CastroData object for a particular pixel"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nbuilding a CastroData object for a particular pixel", "response": "def castroData_from_pix_xy(self, xy, colwise=False):\n \"\"\" Build a CastroData object for a particular pixel \"\"\"\n ipix = self._tsmap.xy_pix_to_ipix(xy, colwise)\n return self.castroData_from_ipix(ipix)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef find_and_refine_peaks(self, threshold, min_separation=1.0,\n use_cumul=False):\n \"\"\"Run a simple peak-finding algorithm, and fit the peaks to\n paraboloids to extract their positions and error ellipses.\n\n Parameters\n ----------\n threshold : float\n Peak threshold in TS.\n\n min_separation : float\n Radius of region size in degrees. Sets the minimum allowable\n separation between peaks.\n\n use_cumul : bool\n If true, used the cumulative TS map (i.e., the TS summed\n over the energy bins) instead of the TS Map from the fit\n to and index=2 powerlaw.\n\n Returns\n -------\n peaks : list\n List of dictionaries containing the location and amplitude of\n each peak. Output of `~fermipy.sourcefind.find_peaks`\n\n \"\"\"\n if use_cumul:\n theMap = self._ts_cumul\n else:\n theMap = self._tsmap\n\n peaks = find_peaks(theMap, threshold, min_separation)\n for peak in peaks:\n o, skydir = fit_error_ellipse(theMap, (peak['ix'], peak['iy']),\n dpix=2)\n peak['fit_loc'] = o\n peak['fit_skydir'] = skydir\n if o['fit_success']:\n skydir = peak['fit_skydir']\n else:\n skydir = peak['skydir']\n\n return peaks", "response": "Run a simple peak - finding algorithm and extract the positions and error ellipses of the peaks to the TS map."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef make_lat_lons(cvects):\n lats = np.degrees(np.arcsin(cvects[2]))\n lons = np.degrees(np.arctan2(cvects[0], cvects[1]))\n return np.hstack([lats, lons])", "response": "Convert from directional cosines to latitidue and longitude"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef make_cos_vects(lon_vect, lat_vect):\n lon_rad = np.radians(lon_vect)\n lat_rad = np.radians(lat_vect)\n cvals = np.cos(lat_rad)\n xvals = cvals * np.sin(lon_rad)\n yvals = cvals * np.cos(lon_rad)\n zvals = np.sin(lat_rad)\n cvects = np.vstack([xvals, yvals, zvals])\n return cvects", "response": "Convert from longitude and latitude values to directional cosines."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef find_matches_by_distance(cos_vects, cut_dist):\n dist_rad = np.radians(cut_dist)\n cos_t_cut = np.cos(dist_rad)\n nsrc = cos_vects.shape[1]\n match_dict = {}\n for i, v1 in enumerate(cos_vects.T):\n cos_t_vect = (v1 * cos_vects.T).sum(1)\n cos_t_vect[cos_t_vect < -1.0] = -1.0\n cos_t_vect[cos_t_vect > 1.0] = 1.0\n mask = cos_t_vect > cos_t_cut\n acos_t_vect = np.ndarray(nsrc)\n # The 1e-6 is here b/c we use 0.0 for sources that failed the cut elsewhere.\n # We should maybe do this better, but it works for now.\n acos_t_vect[mask] = np.degrees(np.arccos(cos_t_vect[mask])) + 1e-6\n for j in np.where(mask[:i])[0]:\n match_dict[(j, i)] = acos_t_vect[j]\n\n return match_dict", "response": "Find all the pairs of sources within a given distance of each other."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef find_matches_by_sigma(cos_vects, unc_vect, cut_sigma):\n match_dict = {}\n sig_2_vect = unc_vect * unc_vect\n for i, v1 in enumerate(cos_vects.T):\n cos_t_vect = (v1 * cos_vects.T).sum(1)\n cos_t_vect[cos_t_vect < -1.0] = -1.0\n cos_t_vect[cos_t_vect > 1.0] = 1.0\n sig_2_i = sig_2_vect[i]\n acos_t_vect = np.degrees(np.arccos(cos_t_vect))\n total_unc = np.sqrt(sig_2_i + sig_2_vect)\n sigma_vect = acos_t_vect / total_unc\n mask = sigma_vect < cut_sigma\n for j in np.where(mask[:i])[0]:\n match_dict[(j, i)] = sigma_vect[j]\n\n return match_dict", "response": "Find all the pairs of sources within a given distance of each other."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef fill_edge_matrix(nsrcs, match_dict):\n e_matrix = np.zeros((nsrcs, nsrcs))\n for k, v in match_dict.items():\n e_matrix[k[0], k[1]] = v\n return e_matrix", "response": "Create and fill a matrix with the edges between sources."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nmakes a reverse dictionary of the cluster list.", "response": "def make_rev_dict_unique(cdict):\n \"\"\" Make a reverse dictionary\n\n Parameters\n ----------\n in_dict : dict(int:dict(int:True)) \n A dictionary of clusters. Each cluster is a source index and\n the dictionary of other sources in the cluster.\n\n Returns\n -------\n rev_dict : dict(int:dict(int:True)) \n A dictionary pointing from source index to the clusters it is\n included in.\n\n \"\"\"\n rev_dict = {}\n for k, v in cdict.items():\n if k in rev_dict:\n rev_dict[k][k] = True\n else:\n rev_dict[k] = {k: True}\n for vv in v.keys():\n if vv in rev_dict:\n rev_dict[vv][k] = True\n else:\n rev_dict[vv] = {k: True}\n return rev_dict"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nfinding clusters from the spanning tree Parameters ---------- span_tree : a sparse nsrcs x nsrcs array Filled with zeros except for the active edges, which are filled with the edge measures (either distances or sigmas cut_value : float Value used to cluster group. All links with measures above this calue will be cut. returns dict(int:[int,...]) A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.", "response": "def make_clusters(span_tree, cut_value):\n \"\"\" Find clusters from the spanning tree\n\n Parameters\n ----------\n span_tree : a sparse nsrcs x nsrcs array\n Filled with zeros except for the active edges, which are filled with the\n edge measures (either distances or sigmas\n\n cut_value : float\n Value used to cluster group. All links with measures above this calue will be cut.\n\n returns dict(int:[int,...]) \n A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster. \n \"\"\"\n iv0, iv1 = span_tree.nonzero()\n\n # This is the dictionary of all the pairings for each source\n match_dict = {}\n\n for i0, i1 in zip(iv0, iv1):\n d = span_tree[i0, i1]\n # Cut on the link distance\n if d > cut_value:\n continue\n\n imin = int(min(i0, i1))\n imax = int(max(i0, i1))\n if imin in match_dict:\n match_dict[imin][imax] = True\n else:\n match_dict[imin] = {imax: True}\n\n working = True\n while working:\n\n working = False\n rev_dict = make_rev_dict_unique(match_dict)\n k_sort = rev_dict.keys()\n k_sort.sort()\n for k in k_sort:\n v = rev_dict[k]\n # Multiple mappings\n if len(v) > 1:\n working = True\n v_sort = v.keys()\n v_sort.sort()\n cluster_idx = v_sort[0]\n for vv in v_sort[1:]:\n try:\n to_merge = match_dict.pop(vv)\n except:\n continue\n try:\n match_dict[cluster_idx].update(to_merge)\n match_dict[cluster_idx][vv] = True\n except:\n continue\n # remove self references\n try:\n match_dict[cluster_idx].pop(cluster_idx)\n except:\n pass\n\n # Convert to a int:list dictionary\n cdict = {}\n for k, v in match_dict.items():\n cdict[k] = v.keys()\n\n # make the reverse dictionary\n rdict = make_reverse_dict(cdict)\n return cdict, rdict"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef select_from_cluster(idx_key, idx_list, measure_vect):\n best_idx = idx_key\n best_measure = measure_vect[idx_key]\n out_list = [idx_key] + idx_list\n for idx, measure in zip(idx_list, measure_vect[idx_list]):\n if measure < best_measure:\n best_idx = idx\n best_measure = measure\n out_list.remove(best_idx)\n return best_idx, out_list", "response": "Select a single source from a cluster and make it the new cluster key\n "} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef find_centroid(cvects, idx_list, weights=None):\n if weights is None:\n weighted = cvects.T[idx_list].sum(0)\n sum_weights = float(len(idx_list))\n else:\n weighted = ((cvects * weights).T[idx_list]).sum(0)\n sum_weights = weights.sum(0)\n weighted /= sum_weights\n # make sure it is normalized\n norm = np.sqrt((weighted * weighted).sum())\n weighted /= norm\n return weighted", "response": "Find the centroid for a set of vectors\n "} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns a vector of sources in each cluster.", "response": "def count_sources_in_cluster(n_src, cdict, rev_dict):\n \"\"\" Make a vector of sources in each cluster\n\n Parameters\n ----------\n n_src : number of sources \n\n cdict : dict(int:[int,]) \n A dictionary of clusters. Each cluster is a source index and\n the list of other source in the cluster.\n\n rev_dict : dict(int:int) \n A single valued dictionary pointing from source index to\n cluster key for each source in a cluster. Note that the key\n does not point to itself.\n\n\n Returns\n ----------\n `np.ndarray((n_src),int)' with the number of in the cluster a given source \n belongs to.\n \"\"\"\n ret_val = np.zeros((n_src), int)\n for i in range(n_src):\n try:\n key = rev_dict[i]\n except KeyError:\n key = i\n try:\n n = len(cdict[key])\n except:\n n = 0\n ret_val[i] = n\n return ret_val"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nfinding the centroid for a set of vectors and return the distances to the centroid", "response": "def find_dist_to_centroid(cvects, idx_list, weights=None):\n \"\"\" Find the centroid for a set of vectors\n\n Parameters\n ----------\n cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values\n\n idx_list : [int,...]\n list of the source indices in the cluster\n\n weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting\n\n returns (np.ndarray(nsrc)) distances to the centroid (in degrees)\n \"\"\"\n centroid = find_centroid(cvects, idx_list, weights)\n dist_vals = np.degrees(np.arccos((centroid * cvects.T[idx_list]).sum(1)))\n return dist_vals, centroid"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef find_dist_to_centroids(cluster_dict, cvects, weights=None):\n distances = np.zeros((cvects.shape[1]))\n cent_dict = {}\n for k, v in cluster_dict.items():\n l = [k] + v\n distances[l], centroid = find_dist_to_centroid(cvects, l, weights)\n cent_dict[k] = make_lat_lons(centroid)\n return distances, cent_dict", "response": "Find the centroids and the distances to the centroid for all sources in a set of clusters."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef select_from_clusters(cluster_dict, measure_vect):\n out_dict = {}\n for idx_key, idx_list in cluster_dict.items():\n out_idx, out_list = select_from_cluster(\n idx_key, idx_list, measure_vect)\n out_dict[out_idx] = out_list\n return out_dict", "response": "Select a single source from each cluster and make it the new cluster key"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef make_reverse_dict(in_dict, warn=True):\n out_dict = {}\n for k, v in in_dict.items():\n for vv in v:\n if vv in out_dict:\n if warn:\n print(\"Dictionary collision %i\" % vv)\n out_dict[vv] = k\n return out_dict", "response": "Build a dictionary that contains the keys of each other source in a cluster."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nconverts the cluster membership dictionary to an array containing the index of the seed of a cluster.", "response": "def make_cluster_vector(rev_dict, n_src):\n \"\"\" Converts the cluster membership dictionary to an array\n\n Parameters\n ----------\n rev_dict : dict(int:int) \n A single valued dictionary pointing from source index to\n cluster key for each source in a cluster. \n\n n_src : int\n Number of source in the array\n\n Returns\n -------\n out_array : `numpy.ndarray' \n An array filled with the index of the seed of a cluster if a source belongs to a cluster, \n and with -1 if it does not.\n \"\"\"\n out_array = -1 * np.ones((n_src), int)\n for k, v in rev_dict.items():\n out_array[k] = v\n # We need this to make sure the see source points at itself\n out_array[v] = v\n return out_array"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef make_cluster_name_vector(cluster_vect, src_names):\n out_array = np.where(cluster_vect >= 0, src_names[cluster_vect], \"\")\n return out_array", "response": "Converts the cluster membership dictionary to an array containing the cluster name."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nconverts the cluster membership array stored in a fits file back to a dictionary.", "response": "def make_dict_from_vector(in_array):\n \"\"\" Converts the cluster membership array stored in a fits file back to a dictionary\n\n Parameters\n ----------\n in_array : `np.ndarray' \n An array filled with the index of the seed of a cluster if a source belongs to a cluster, \n and with -1 if it does not.\n\n Returns\n -------\n returns dict(int:[int,...]) \n Dictionary of clusters keyed by the best source in each cluster\n \"\"\"\n out_dict = {}\n for i, k in enumerate(in_array):\n if k < 0:\n continue\n try:\n out_dict[k].append(i)\n except KeyError:\n out_dict[k] = [i]\n return out_dict"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef filter_and_copy_table(tab, to_remove):\n nsrcs = len(tab)\n mask = np.zeros((nsrcs), '?')\n mask[to_remove] = True\n inv_mask = np.invert(mask)\n out_tab = tab[inv_mask]\n return out_tab", "response": "Filter and copy a FITS table."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nperform baseline fitting for a target region of Interest.", "response": "def baseline_roi_fit(gta, make_plots=False, minmax_npred=[1e3, np.inf]):\n \"\"\"Do baseline fitting for a target Region of Interest\n\n Parameters\n ----------\n\n gta : `fermipy.gtaanalysis.GTAnalysis`\n The analysis object\n\n make_plots : bool\n Flag to make standard analysis plots\n\n minmax_npred : tuple or list\n Range of number of predicted coutns for which to free sources in initial fitting.\n\n \"\"\"\n gta.free_sources(False)\n gta.write_roi('base_roi', make_plots=make_plots)\n \n gta.free_sources(True, minmax_npred=[1e3, np.inf])\n gta.optimize()\n gta.free_sources(False) \n gta.print_roi()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nadding a source and get the set of correlated sources", "response": "def add_source_get_correlated(gta, name, src_dict, correl_thresh=0.25, non_null_src=False):\n \"\"\"Add a source and get the set of correlated sources\n\n Parameters\n ----------\n\n gta : `fermipy.gtaanalysis.GTAnalysis`\n The analysis object\n\n name : str\n Name of the source we are adding\n\n src_dict : dict\n Dictionary of the source parameters\n\n correl_thresh : float\n Threshold for considering a source to be correlated\n\n non_null_src : bool\n If True, don't zero the source\n\n Returns\n -------\n\n cdict : dict\n Dictionary with names and correlation factors of correlated sources \n\n test_src_name : bool\n Name of the test source\n\n \"\"\"\n if gta.roi.has_source(name):\n gta.zero_source(name)\n gta.update_source(name)\n test_src_name = \"%s_test\" % name\n else:\n test_src_name = name \n\n gta.add_source(test_src_name, src_dict)\n gta.free_norm(test_src_name)\n gta.free_shape(test_src_name, free=False)\n fit_result = gta.fit(covar=True)\n\n mask = fit_result['is_norm']\n src_names = np.array(fit_result['src_names'])[mask]\n idx = (src_names == test_src_name).argmax()\n correl_vals = fit_result['correlation'][idx][mask]\n\n cdict = {}\n for src_name, correl_val in zip(src_names, correl_vals):\n if src_name == name:\n continue\n if np.fabs(correl_val) > 0.25:\n cdict[src_name] = correl_val\n\n if not non_null_src:\n gta.zero_source(test_src_name)\n\n gta.fit(covar=True)\n\n return cdict, test_src_name"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef build_profile_dict(basedir, profile_name):\n profile_path = os.path.join(basedir, \"profile_%s.yaml\" % profile_name)\n profile_config = load_yaml(profile_path)\n src_name = profile_config['name']\n profile_dict = profile_config['source_model']\n return profile_name, src_name, profile_dict", "response": "Build the name and source dictionary for the test source."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ngetting the correct set of arguments for a batch job.", "response": "def get_batch_job_args(job_time=1500):\n \"\"\" Get the correct set of batch jobs arguments.\n\n Parameters\n ----------\n\n job_time : int\n Expected max length of the job, in seconds.\n This is used to select the batch queue and set the\n job_check_sleep parameter that sets how often\n we check for job completion.\n\n Returns\n -------\n job_args : dict\n Dictionary of arguments used to submit a batch job\n\n \"\"\"\n if DEFAULT_JOB_TYPE == 'slac':\n from fermipy.jobs.slac_impl import get_slac_default_args\n return get_slac_default_args(job_time)\n elif DEFAULT_JOB_TYPE == 'native':\n from fermipy.jobs.native_impl import get_native_default_args\n return get_native_default_args()\n return None"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_batch_job_interface(job_time=1500):\n batch_job_args = get_batch_job_args(job_time)\n\n if DEFAULT_JOB_TYPE == 'slac':\n from fermipy.jobs.slac_impl import SlacInterface\n return SlacInterface(**batch_job_args)\n elif DEFAULT_JOB_TYPE == 'native':\n from fermipy.jobs.native_impl import NativeInterface\n return NativeInterface(**batch_job_args)\n return None", "response": "Create a batch job interface object."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nregister these classes with the LinkFactory", "response": "def register_classes():\n \"\"\"Register these classes with the `LinkFactory` \"\"\"\n CopyBaseROI.register_class()\n CopyBaseROI_SG.register_class()\n SimulateROI.register_class()\n SimulateROI_SG.register_class()\n RandomDirGen.register_class()\n RandomDirGen_SG.register_class()"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncopy a list of files from orig_dir to dest_dir", "response": "def copy_analysis_files(cls, orig_dir, dest_dir, copyfiles):\n \"\"\" Copy a list of files from orig_dir to dest_dir\"\"\"\n for pattern in copyfiles:\n glob_path = os.path.join(orig_dir, pattern)\n files = glob.glob(glob_path)\n for ff in files:\n f = os.path.basename(ff)\n orig_path = os.path.join(orig_dir, f)\n dest_path = os.path.join(dest_dir, f)\n try:\n copyfile(orig_path, dest_path)\n except IOError:\n sys.stderr.write(\"WARNING: failed to copy %s\\n\" % orig_path)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef copy_target_dir(cls, orig_dir, dest_dir, roi_baseline, extracopy):\n try:\n os.makedirs(dest_dir)\n except OSError:\n pass\n\n copyfiles = ['%s.fits' % roi_baseline,\n '%s.npy' % roi_baseline,\n '%s_*.xml' % roi_baseline] + cls.copyfiles\n if isinstance(extracopy, list):\n copyfiles += extracopy\n\n cls.copy_analysis_files(orig_dir, dest_dir, copyfiles)", "response": "Copy the target directory to dest_dir."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nhooks to build job configurations based on arguments", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (sim_targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n targets = load_yaml(sim_targets_yaml)\n\n base_config = dict(ttype=ttype,\n roi_baseline=args['roi_baseline'],\n extracopy = args['extracopy'],\n sim=sim)\n\n for target_name in targets.keys():\n targetdir = NAME_FACTORY.sim_targetdir(target_type=ttype,\n target_name=target_name,\n sim_name=sim)\n logfile = os.path.join(targetdir, 'copy_base_dir.log')\n job_config = base_config.copy()\n job_config.update(dict(target=target_name,\n logfile=logfile))\n job_configs[target_name] = job_config\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nbuild a WCS. Geom object from a fermipy coniguration file", "response": "def _make_wcsgeom_from_config(config):\n \"\"\"Build a `WCS.Geom` object from a fermipy coniguration file\"\"\"\n\n binning = config['binning']\n binsz = binning['binsz']\n coordsys = binning.get('coordsys', 'GAL')\n roiwidth = binning['roiwidth']\n proj = binning.get('proj', 'AIT')\n ra = config['selection']['ra']\n dec = config['selection']['dec']\n npix = int(np.round(roiwidth / binsz))\n skydir = SkyCoord(ra * u.deg, dec * u.deg)\n\n wcsgeom = WcsGeom.create(npix=npix, binsz=binsz,\n proj=proj, coordsys=coordsys,\n skydir=skydir)\n return wcsgeom"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _build_skydir_dict(wcsgeom, rand_config):\n step_x = rand_config['step_x']\n step_y = rand_config['step_y']\n max_x = rand_config['max_x']\n max_y = rand_config['max_y']\n seed = rand_config['seed']\n nsims = rand_config['nsims']\n\n cdelt = wcsgeom.wcs.wcs.cdelt\n pixstep_x = step_x / cdelt[0]\n pixstep_y = -1. * step_y / cdelt[1]\n pixmax_x = max_x / cdelt[0]\n pixmax_y = max_y / cdelt[0]\n\n nstep_x = int(np.ceil(2. * pixmax_x / pixstep_x)) + 1\n nstep_y = int(np.ceil(2. * pixmax_y / pixstep_y)) + 1\n\n center = np.array(wcsgeom._center_pix)\n\n grid = np.meshgrid(np.linspace(-1 * pixmax_x, pixmax_x, nstep_x),\n np.linspace(-1 * pixmax_y, pixmax_y, nstep_y))\n grid[0] += center[0]\n grid[1] += center[1]\n\n test_grid = wcsgeom.pix_to_coord(grid)\n glat_vals = test_grid[0].flat\n glon_vals = test_grid[1].flat\n conv_vals = SkyCoord(glat_vals * u.deg, glon_vals *\n u.deg, frame=Galactic).transform_to(ICRS)\n\n ra_vals = conv_vals.ra.deg[seed:nsims]\n dec_vals = conv_vals.dec.deg[seed:nsims]\n\n o_dict = {}\n for i, (ra, dec) in enumerate(zip(ra_vals, dec_vals)):\n key = i + seed\n o_dict[key] = dict(ra=ra, dec=dec)\n return o_dict", "response": "Build a dictionary of random directions for a set of sky locations."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nclones the configuration and srcmaps", "response": "def _clone_config_and_srcmaps(config_path, seed):\n \"\"\"Clone the configuration\"\"\"\n workdir = os.path.dirname(config_path)\n new_config_path = config_path.replace('.yaml', '_%06i.yaml' % seed)\n config = load_yaml(config_path)\n comps = config.get('components', [config])\n for i, comp in enumerate(comps):\n comp_name = \"%02i\" % i\n if 'gtlike' not in comp:\n comp['gtlike'] = {}\n orig_srcmap = os.path.abspath(os.path.join(workdir, 'srcmap_%s.fits' % (comp_name)))\n new_srcmap = os.path.abspath(os.path.join(workdir, 'srcmap_%06i_%s.fits' % (seed, comp_name)))\n comp['gtlike']['srcmap'] = os.path.abspath(os.path.join(workdir, 'srcmap_%06i_%s.fits' % (seed, comp_name)))\n comp['gtlike']['use_external_srcmap'] = True\n copyfile(orig_srcmap, new_srcmap)\n\n write_yaml(config, new_config_path)\n return new_config_path"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nsimulate a realization of a test set.", "response": "def _run_simulation(gta, roi_baseline,\n injected_name, test_sources, current_seed, seed, non_null_src):\n \"\"\"Simulate a realization of this analysis\"\"\"\n gta.load_roi('sim_baseline_%06i.npy' % current_seed)\n gta.set_random_seed(seed)\n gta.simulate_roi()\n if injected_name:\n gta.zero_source(injected_name)\n\n gta.optimize()\n gta.find_sources(sqrt_ts_threshold=5.0, search_skydir=gta.roi.skydir,\n search_minmax_radius=[1.0, np.nan])\n gta.optimize()\n gta.free_sources(skydir=gta.roi.skydir, distance=1.0, pars='norm')\n gta.fit(covar=True)\n gta.write_roi('sim_refit_%06i' % current_seed)\n\n for test_source in test_sources:\n test_source_name = test_source['name']\n sedfile = \"sed_%s_%06i.fits\" % (test_source_name, seed)\n correl_dict, test_src_name = add_source_get_correlated(gta, test_source_name,\n test_source['source_model'],\n correl_thresh=0.25,\n non_null_src=non_null_src)\n\n # Write the list of correlated sources\n correl_yaml = os.path.join(gta.workdir,\n \"correl_%s_%06i.yaml\" % (test_source_name, seed))\n write_yaml(correl_dict, correl_yaml)\n\n gta.free_sources(False)\n for src_name in correl_dict.keys():\n gta.free_source(src_name, pars='norm')\n\n gta.sed(test_source_name, prefix=pkey, outfile=sedfile)\n # Set things back to how they were\n gta.delete_source(test_source_name)\n gta.load_xml('sim_refit_%06i' % current_seed)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if targets_yaml is None:\n return job_configs\n\n config_yaml = 'config.yaml'\n config_override = args.get('config')\n if is_not_null(config_override):\n config_yaml = config_override\n\n rand_yaml = NAME_FACTORY.resolve_randconfig(args)\n\n targets = load_yaml(targets_yaml)\n\n base_config = dict(rand_config=rand_yaml)\n\n for target_name in targets.keys():\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n fullpath=True)\n simdir = NAME_FACTORY.sim_targetdir(**name_keys)\n config_path = os.path.join(simdir, config_yaml)\n outfile = os.path.join(simdir, 'skydirs.yaml')\n logfile = make_nfs_path(outfile.replace('yaml', 'log'))\n job_config = base_config.copy()\n job_config.update(dict(config=config_path,\n outfile=outfile,\n logfile=logfile))\n job_configs[target_name] = job_config\n\n return job_configs", "response": "Hook to build job configurations based on the input arguments."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if targets_yaml is None:\n return job_configs\n\n config_yaml = 'config.yaml'\n config_override = args.get('config')\n if is_not_null(config_override):\n config_yaml = config_override\n\n targets = load_yaml(targets_yaml)\n nsims_job = args['nsims_job']\n first_seed = args['seed']\n nsims = args['nsims']\n last_seed = first_seed + nsims\n\n base_config = dict(sim_profile=args['sim_profile'],\n roi_baseline=args['roi_baseline'],\n non_null_src=args['non_null_src'],\n sim=sim)\n\n for target_name, target_list in targets.items():\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n fullpath=True)\n simdir = NAME_FACTORY.sim_targetdir(**name_keys)\n config_path = os.path.join(simdir, config_yaml)\n\n job_config = base_config.copy()\n job_config.update(dict(config=config_path,\n profiles=target_list))\n\n current_seed = first_seed\n while current_seed < last_seed:\n fullkey = \"%s_%06i\" % (target_name, current_seed)\n logfile = make_nfs_path(os.path.join(simdir, \"%s_%s_%06i.log\" % (self.linkname, \n target_name, current_seed)))\n if nsims_job <= 0 or current_seed + nsims_job >= last_seed:\n nsims_current = last_seed - current_seed\n else:\n nsims_current = nsims_job\n job_config.update(dict(seed=current_seed,\n nsims=nsims_current,\n logfile=logfile))\n job_configs[fullkey] = job_config.copy()\n current_seed += nsims_current\n\n return job_configs", "response": "Build the job configurations for the given target file."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_branches(aliases):\n\n ignore = ['pow', 'log10', 'sqrt', 'max']\n branches = []\n for k, v in aliases.items():\n\n tokens = re.sub('[\\(\\)\\+\\*\\/\\,\\=\\<\\>\\&\\!\\-\\|]', ' ', v).split()\n\n for t in tokens:\n\n if bool(re.search(r'^\\d', t)) or len(t) <= 3:\n continue\n\n if bool(re.search(r'[a-zA-Z]', t)) and t not in ignore:\n branches += [t]\n\n return list(set(branches))", "response": "Get unique branch names from an alias dictionary."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef load_friend_chains(chain, friend_chains, txt, nfiles=None):\n\n if re.search('.root?', txt) is not None:\n c = ROOT.TChain(chain.GetName())\n c.SetDirectory(0)\n c.Add(txt)\n friend_chains.append(c)\n chain.AddFriend(c, rand_str())\n return\n\n files = np.loadtxt(txt, unpack=True, dtype='str')\n if files.ndim == 0:\n files = np.array([files])\n if nfiles is not None:\n files = files[:nfiles]\n print(\"Loading %i files...\" % len(files))\n\n c = ROOT.TChain(chain.GetName())\n c.SetDirectory(0)\n for f in files:\n c.Add(f)\n friend_chains.append(c)\n\n chain.AddFriend(c, rand_str())\n return", "response": "Load a list of trees from a file and add them as friends to the\n chain."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef find_and_read_ebins(hdulist):\n from fermipy import utils\n ebins = None\n if 'ENERGIES' in hdulist:\n hdu = hdulist['ENERGIES']\n ectr = hdu.data.field(hdu.columns[0].name)\n ebins = np.exp(utils.center_to_edge(np.log(ectr)))\n elif 'EBOUNDS' in hdulist:\n hdu = hdulist['EBOUNDS']\n emin = hdu.data.field('E_MIN') / 1E3\n emax = hdu.data.field('E_MAX') / 1E3\n ebins = np.append(emin, emax[-1])\n return ebins", "response": "Reads and returns the energy bin edges of the current resource."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreading and returns the energy bin edges from a FITs HDU", "response": "def read_energy_bounds(hdu):\n \"\"\" Reads and returns the energy bin edges from a FITs HDU\n \"\"\"\n nebins = len(hdu.data)\n ebin_edges = np.ndarray((nebins + 1))\n try:\n ebin_edges[0:-1] = np.log10(hdu.data.field(\"E_MIN\")) - 3.\n ebin_edges[-1] = np.log10(hdu.data.field(\"E_MAX\")[-1]) - 3.\n except KeyError:\n ebin_edges[0:-1] = np.log10(hdu.data.field(\"energy_MIN\"))\n ebin_edges[-1] = np.log10(hdu.data.field(\"energy_MAX\")[-1])\n return ebin_edges"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef read_spectral_data(hdu):\n ebins = read_energy_bounds(hdu)\n fluxes = np.ndarray((len(ebins)))\n try:\n fluxes[0:-1] = hdu.data.field(\"E_MIN_FL\")\n fluxes[-1] = hdu.data.field(\"E_MAX_FL\")[-1]\n npreds = hdu.data.field(\"NPRED\")\n except:\n fluxes = np.ones((len(ebins)))\n npreds = np.ones((len(ebins)))\n return ebins, fluxes, npreds", "response": "Reads and returns the energy bin edges fluxes and npreds from\n a FITs HDU\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nbuild and returns a FITs HDU with the energy values", "response": "def make_energies_hdu(energy_vals, extname=\"ENERGIES\"):\n \"\"\" Builds and returns a FITs HDU with the energy values\n\n extname : The HDU extension name \n \"\"\"\n cols = [fits.Column(\"Energy\", \"D\", unit='MeV', array=energy_vals)]\n hdu = fits.BinTableHDU.from_columns(cols, name=extname)\n return hdu"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nread a WCS or HPX projection from a FITS file.", "response": "def read_projection_from_fits(fitsfile, extname=None):\n \"\"\"\n Load a WCS or HPX projection.\n \"\"\"\n f = fits.open(fitsfile)\n nhdu = len(f)\n # Try and get the energy bounds\n try:\n ebins = find_and_read_ebins(f)\n except:\n ebins = None\n\n if extname is None:\n # If there is an image in the Primary HDU we can return a WCS-based\n # projection\n if f[0].header['NAXIS'] != 0:\n proj = WCS(f[0].header)\n return proj, f, f[0]\n else:\n if f[extname].header['XTENSION'] == 'IMAGE':\n proj = WCS(f[extname].header)\n return proj, f, f[extname]\n elif extname in ['SKYMAP', 'SKYMAP2']:\n proj = HPX.create_from_hdu(f[extname], ebins)\n return proj, f, f[extname]\n elif f[extname].header['XTENSION'] == 'BINTABLE':\n try:\n if f[extname].header['PIXTYPE'] == 'HEALPIX':\n proj = HPX.create_from_hdu(f[extname], ebins)\n return proj, f, f[extname]\n except:\n pass\n return None, f, None\n\n # Loop on HDU and look for either an image or a table with HEALPix data\n for i in range(1, nhdu):\n # if there is an image we can return a WCS-based projection\n if f[i].header['XTENSION'] == 'IMAGE':\n proj = WCS(f[i].header)\n return proj, f, f[i]\n elif f[i].header['XTENSION'] == 'BINTABLE':\n if f[i].name in ['SKYMAP', 'SKYMAP2']:\n proj = HPX.create_from_hdu(f[i], ebins)\n return proj, f, f[i]\n try:\n if f[i].header['PIXTYPE'] == 'HEALPIX':\n proj = HPX.create_from_hdu(f[i], ebins)\n return proj, f, f[i]\n except:\n pass\n\n return None, f, None"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nwriting a list of astropy. table. Table objects to a single FITS file.", "response": "def write_tables_to_fits(filepath, tablelist, clobber=False,\n namelist=None, cardslist=None, hdu_list=None):\n \"\"\"\n Write some astropy.table.Table objects to a single fits file\n \"\"\"\n outhdulist = [fits.PrimaryHDU()]\n rmlist = []\n for i, table in enumerate(tablelist):\n ft_name = \"%s._%i\" % (filepath, i)\n rmlist.append(ft_name)\n try:\n os.unlink(ft_name)\n except:\n pass\n table.write(ft_name, format=\"fits\")\n ft_in = fits.open(ft_name)\n if namelist:\n ft_in[1].name = namelist[i]\n if cardslist:\n for k, v in cardslist[i].items():\n ft_in[1].header[k] = v\n ft_in[1].update()\n outhdulist += [ft_in[1]]\n\n if hdu_list is not None:\n for h in hdu_list:\n outhdulist.append(h)\n\n fits.HDUList(outhdulist).writeto(filepath, overwrite=clobber)\n for rm in rmlist:\n os.unlink(rm)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef update_docstring(docstring, options_dict):\n\n options_str = []\n for i, (k, v) in enumerate(sorted(options_dict.items())):\n\n option_str = ''\n if i == 0:\n option_str += '%s : %s\\n' % (k, v[2].__name__)\n else:\n option_str += ' ' * 8 + '%s : %s\\n' % (k, v[2].__name__)\n\n option_doc = v[1]\n option_doc += ' (default : %s)' % v[0]\n option_doc = textwrap.wrap(option_doc, 72 - 12)\n option_str += ' ' * 12 + ('\\n' + ' ' * 12).join(option_doc)\n options_str += [option_str]\n\n options_str = '\\n\\n'.join(options_str)\n\n return docstring.format(options=options_str)", "response": "Update a method docstring by inserting option docstrings defined in the options dictionary."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef convolve_map(m, k, cpix, threshold=0.001, imin=0, imax=None, wmap=None):\n islice = slice(imin, imax)\n\n o = np.zeros(m[islice, ...].shape)\n ix = int(cpix[0])\n iy = int(cpix[1])\n\n # Loop over energy\n for i in range(m[islice, ...].shape[0]):\n\n ks = k[islice, ...][i, ...]\n ms = m[islice, ...][i, ...]\n\n mx = ks[ix, :] > ks[ix, iy] * threshold\n my = ks[:, iy] > ks[ix, iy] * threshold\n\n nx = int(max(3, np.round(np.sum(mx) / 2.)))\n ny = int(max(3, np.round(np.sum(my) / 2.)))\n\n # Ensure that there is an odd number of pixels in the kernel\n # array\n if ix + nx + 1 >= ms.shape[0] or ix - nx < 0:\n nx -= 1\n ny -= 1\n\n sx = slice(ix - nx, ix + nx + 1)\n sy = slice(iy - ny, iy + ny + 1)\n\n ks = ks[sx, sy]\n\n# origin = [0, 0]\n# if ks.shape[0] % 2 == 0: origin[0] += 1\n# if ks.shape[1] % 2 == 0: origin[1] += 1\n# o[i,...] = ndimage.convolve(ms, ks, mode='constant',\n# origin=origin, cval=0.0)\n\n o[i, ...] = scipy.signal.fftconvolve(ms, ks, mode='same')\n if wmap is not None:\n o[i, ...] *= wmap[islice, ...][i, ...]\n\n return o", "response": "This function performs an energy - dependent convolution on a sequence of 3 - D spatial maps."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nperforming an energy - dependent convolution on a sequence of HEALPix maps.", "response": "def convolve_map_hpx_gauss(m, sigmas, imin=0, imax=None, wmap=None):\n \"\"\"\n Perform an energy-dependent convolution on a sequence of 2-D spatial maps.\n\n Parameters\n ----------\n\n m : `HpxMap`\n 2-D map containing a sequence of 1-D HEALPix maps. First\n dimension should be energy.\n\n sigmas : `~numpy.ndarray`\n 1-D map containing a sequence gaussian widths for smoothing\n\n imin : int\n Minimum index in energy dimension.\n\n imax : int\n Maximum index in energy dimension.\n\n wmap : `~numpy.ndarray`\n 2-D map containing a sequence of 1-D HEALPix maps of weights. First\n dimension should be energy. This map should have the same dimension as m.\n\n \"\"\"\n islice = slice(imin, imax)\n\n o = np.zeros(m.data.shape)\n\n nside = m.geom.nside\n nest = m.geom.nest\n\n # Loop over energy\n for i, ms in enumerate(m.data[islice, ...]):\n sigma = sigmas[islice][i]\n # Need to be in RING scheme\n if nest:\n ms = hp.pixelfunc.reorder(ms, n2r=True)\n\n o[islice, ...][i] = hp.sphtfunc.smoothing(ms, sigma=sigma)\n if nest:\n o[islice, ...][i] = hp.pixelfunc.reorder(\n o[islice, ...][i], r2n=True)\n if wmap is not None:\n o[islice, ...][i] *= wmap.data[islice, ...][i]\n\n return HpxNDMap(m.geom, o)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_source_kernel(gta, name, kernel=None):\n\n sm = []\n zs = 0\n for c in gta.components:\n z = c.model_counts_map(name).data.astype('float')\n if kernel is not None:\n shape = (z.shape[0],) + kernel.shape\n z = np.apply_over_axes(np.sum, z, axes=[1, 2]) * np.ones(\n shape) * kernel[np.newaxis, :, :]\n zs += np.sum(z)\n else:\n zs += np.sum(z)\n\n sm.append(z)\n\n sm2 = 0\n for i, m in enumerate(sm):\n sm[i] /= zs\n sm2 += np.sum(sm[i] ** 2)\n\n for i, m in enumerate(sm):\n sm[i] /= sm2\n\n return sm", "response": "Get the PDF for the given source."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef residmap(self, prefix='', **kwargs):\n timer = Timer.create(start=True)\n self.logger.info('Generating residual maps')\n\n schema = ConfigSchema(self.defaults['residmap'])\n\n config = schema.create_config(self.config['residmap'], **kwargs)\n\n # Defining default properties of test source model\n config['model'].setdefault('Index', 2.0)\n config['model'].setdefault('SpectrumType', 'PowerLaw')\n config['model'].setdefault('SpatialModel', 'PointSource')\n config['model'].setdefault('Prefactor', 1E-13)\n\n o = self._make_residual_map(prefix, **config)\n\n if config['make_plots']:\n plotter = plotting.AnalysisPlotter(self.config['plotting'],\n fileio=self.config['fileio'],\n logging=self.config['logging'])\n\n plotter.make_residmap_plots(o, self.roi)\n\n self.logger.info('Finished residual maps')\n\n outfile = utils.format_filename(self.workdir, 'residmap',\n prefix=[o['name']])\n\n if config['write_fits']:\n o['file'] = os.path.basename(outfile) + '.fits'\n self._make_residmap_fits(o, outfile + '.fits')\n\n if config['write_npy']:\n np.save(outfile + '.npy', o)\n\n self.logger.info('Execution time: %.2f s', timer.elapsed_time)\n return o", "response": "Generate 2 - D spatial residual maps for the current ROI and the convolution kernel defined with the model\n argument."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef create(appname, **kwargs):\n\n if appname in LinkFactory._class_dict:\n return LinkFactory._class_dict[appname].create(**kwargs)\n else:\n raise KeyError(\n \"Could not create object associated to app %s\" % appname)", "response": "Create a Link of a particular class using the kwargs as options"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nmapping from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n comp_file = args.get('comp', None)\n datafile = args.get('data', None)\n if is_null(comp_file):\n return\n if is_null(datafile):\n return\n\n NAME_FACTORY.update_base_dict(datafile)\n\n outdir = args.get('outdir', None)\n outkey = args.get('outkey', None)\n ft1file = args['ft1file']\n\n if is_null(outdir) or is_null(outkey):\n return\n pfiles = os.path.join(outdir, outkey)\n\n self.comp_dict = yaml.safe_load(open(comp_file))\n coordsys = self.comp_dict.pop('coordsys')\n full_out_dir = make_nfs_path(os.path.join(outdir, outkey))\n\n for key_e, comp_e in sorted(self.comp_dict.items()):\n emin = math.pow(10., comp_e['log_emin'])\n emax = math.pow(10., comp_e['log_emax'])\n enumbins = comp_e['enumbins']\n zmax = comp_e['zmax']\n zcut = \"zmax%i\" % comp_e['zmax']\n evclassstr = NAME_FACTORY.base_dict['evclass']\n kwargs_select = dict(zcut=zcut,\n ebin=key_e,\n psftype='ALL',\n coordsys=coordsys,\n mktime='none')\n selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select))\n linkname = 'select-energy-%s-%s' % (key_e, zcut)\n self._set_link(linkname, Gtlink_select,\n infile=ft1file,\n outfile=selectfile_energy,\n zmax=zmax,\n emin=emin,\n emax=emax,\n evclass=NAME_FACTORY.evclassmask(evclassstr),\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname))\n\n if 'evtclasses' in comp_e:\n evtclasslist_vals = comp_e['evtclasses']\n else:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n\n for evtclassval in evtclasslist_vals:\n for psf_type, psf_dict in sorted(comp_e['psf_types'].items()):\n linkname_select = 'select-type-%s-%s-%s-%s' % (\n key_e, zcut, evtclassval, psf_type)\n linkname_bin = 'bin-%s-%s-%s-%s' % (key_e, zcut, evtclassval, psf_type)\n hpx_order = psf_dict['hpx_order']\n kwargs_bin = kwargs_select.copy()\n kwargs_bin['psftype'] = psf_type\n selectfile_psf = make_full_path(\n outdir, outkey, NAME_FACTORY.select(**kwargs_bin))\n binfile = make_full_path(outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin))\n self._set_link(linkname_select, Gtlink_select,\n infile=selectfile_energy,\n outfile=selectfile_psf,\n zmax=zmax,\n emin=emin,\n emax=emax,\n evtype=EVT_TYPE_DICT[psf_type],\n evclass=NAME_FACTORY.evclassmask(evtclassval),\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname_select))\n\n self._set_link(linkname_bin, Gtlink_bin,\n coordsys=coordsys,\n hpx_order=hpx_order,\n evfile=selectfile_psf,\n outfile=binfile,\n emin=emin,\n emax=emax,\n enumbins=enumbins,\n pfiles=pfiles,\n logfile=os.path.join(full_out_dir, \"%s.log\" % linkname_bin))"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n data = args.get('data')\n comp = args.get('comp')\n ft1file = args.get('ft1file')\n scratch = args.get('scratch', None)\n dry_run = args.get('dry_run', None)\n\n self._set_link('split-and-bin', SplitAndBin_SG,\n comp=comp, data=data,\n hpx_order_max=args.get('hpx_order_ccube', 9),\n ft1file=ft1file,\n scratch=scratch,\n dry_run=dry_run)\n\n self._set_link('coadd-split', CoaddSplit_SG,\n comp=comp, data=data,\n ft1file=ft1file)\n\n self._set_link('expcube2', Gtexpcube2_SG,\n comp=comp, data=data,\n hpx_order_max=args.get('hpx_order_expcube', 5),\n dry_run=dry_run)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef _replace_none(self, aDict):\n for k, v in aDict.items():\n if v is None:\n aDict[k] = 'none'", "response": "Replace all None values in a dict with none"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nget the name of IFRs associated with a particular dataset", "response": "def irfs(self, **kwargs):\n \"\"\" Get the name of IFRs associted with a particular dataset\n \"\"\"\n dsval = kwargs.get('dataset', self.dataset(**kwargs))\n tokens = dsval.split('_')\n irf_name = \"%s_%s_%s\" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])],\n EVCLASS_NAME_DICTIONARY[tokens[3]],\n kwargs.get('irf_ver'))\n return irf_name"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn a key that specifies the data selection", "response": "def dataset(self, **kwargs):\n \"\"\" Return a key that specifies the data selection\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy)\n try:\n return NameFactory.dataset_format.format(**kwargs_copy)\n except KeyError:\n return None"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns a key that specifies data the sub - selection", "response": "def component(self, **kwargs):\n \"\"\" Return a key that specifies data the sub-selection\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n try:\n return NameFactory.component_format.format(**kwargs_copy)\n except KeyError:\n return None"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn a key that specifies the name and version of a source or component", "response": "def sourcekey(self, **kwargs):\n \"\"\" Return a key that specifies the name and version of a source or component\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n try:\n return NameFactory.sourcekey_format.format(**kwargs_copy)\n except KeyError:\n return None"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns the sourcekey for galprop input maps", "response": "def galprop_ringkey(self, **kwargs):\n \"\"\" return the sourcekey for galprop input maps : specifies the component and ring\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n try:\n return NameFactory.galprop_ringkey_format.format(**kwargs_copy)\n except KeyError:\n return None"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef galprop_sourcekey(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n try:\n return NameFactory.galprop_sourcekey_format.format(**kwargs_copy)\n except KeyError:\n return None", "response": "return the sourcekey for merged galprop maps"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn the sourcekey for merged sets of point sources", "response": "def merged_sourcekey(self, **kwargs):\n \"\"\" return the sourcekey for merged sets of point sources :\n specifies the catalog and merging rule\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n try:\n return NameFactory.merged_sourcekey_format.format(**kwargs_copy)\n except KeyError:\n return None"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the file name for Galprop input gasmaps", "response": "def galprop_gasmap(self, **kwargs):\n \"\"\" return the file name for Galprop input gasmaps\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.galprop_gasmap_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the file name for Galprop merged gasmaps", "response": "def merged_gasmap(self, **kwargs):\n \"\"\" return the file name for Galprop merged gasmaps\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef diffuse_template(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.diffuse_template_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the file name for other diffuse map templates\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef spectral_template(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n localpath = NameFactory.spectral_template_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the file name for spectral templates\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef srcmdl_xml(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the file name for source model xml files\n "} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the file name for source model xml files of nested sources", "response": "def nested_srcmdl_xml(self, **kwargs):\n \"\"\" return the file name for source model xml files of nested sources\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.nested_srcmdl_xml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef ft1file(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.ft1file_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of the input ft1 file list\n "} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef ft2file(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['data_time'] = kwargs.get(\n 'data_time', self.dataset(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.ft2file_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of the input ft2 file list\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the name of a livetime cube file", "response": "def ltcube(self, **kwargs):\n \"\"\" return the name of a livetime cube file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n localpath = NameFactory.ltcube_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn the name of a selected events ft1file", "response": "def select(self, **kwargs):\n \"\"\" return the name of a selected events ft1file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.select_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef mktime(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.mktime_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a selected events ft1file\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef ccube(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n localpath = NameFactory.ccube_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a counts cube file"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nreturning the name of a binned exposure cube file", "response": "def bexpcube(self, **kwargs):\n \"\"\" return the name of a binned exposure cube file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.bexpcube_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn the name of a source map file", "response": "def srcmaps(self, **kwargs):\n \"\"\" return the name of a source map file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.srcmaps_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn the name of a model cube file", "response": "def mcube(self, **kwargs):\n \"\"\" return the name of a model cube file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.mcube_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef ltcube_sun(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.ltcubesun_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a livetime cube file"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef ltcube_moon(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.ltcubemoon_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a livetime cube file"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns the name of a binned exposure cube file", "response": "def bexpcube_sun(self, **kwargs):\n \"\"\" return the name of a binned exposure cube file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.bexpcubesun_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreturning the name of a binned exposure cube file", "response": "def bexpcube_moon(self, **kwargs):\n \"\"\" return the name of a binned exposure cube file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.bexpcubemoon_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef angprofile(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.angprofile_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the file name for sun or moon angular profiles\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the file name for sun or moon template files", "response": "def template_sunmoon(self, **kwargs):\n \"\"\" return the file name for sun or moon template files\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef residual_cr(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n localpath = NameFactory.residual_cr_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "Return the name of the residual CR analysis output files"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef galprop_rings_yaml(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.galprop_rings_yaml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a galprop rings merging yaml file"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef catalog_split_yaml(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.catalog_split_yaml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a catalog split yaml file"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef model_yaml(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.model_yaml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a model yaml file"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef merged_srcmaps(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.merged_srcmaps_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a source map file that is merged with another source map file"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef master_srcmdl_xml(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n localpath = NameFactory.master_srcmdl_xml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath", "response": "return the name of a master source model file"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the name of a source model file", "response": "def comp_srcmdl_xml(self, **kwargs):\n \"\"\" return the name of a source model file\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n localpath = NameFactory.comp_srcmdl_xml_format.format(**kwargs_copy)\n if kwargs.get('fullpath', False):\n return self.fullpath(localpath=localpath)\n return localpath"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef fullpath(self, **kwargs):\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n self._replace_none(kwargs_copy) \n return NameFactory.fullpath_format.format(**kwargs_copy)", "response": "Return a full path name for a given file\n "} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a generic filename for a given dataset and component", "response": "def generic(self, input_string, **kwargs):\n \"\"\" return a generic filename for a given dataset and component\n \"\"\"\n kwargs_copy = self.base_dict.copy()\n kwargs_copy.update(**kwargs)\n kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))\n kwargs_copy['component'] = kwargs.get(\n 'component', self.component(**kwargs))\n self._replace_none(kwargs_copy) \n return input_string.format(**kwargs_copy)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nmaking a dictionary of filenames for various types", "response": "def make_filenames(self, **kwargs):\n \"\"\" Make a dictionary of filenames for various types\n \"\"\"\n out_dict = dict(ft1file=self.ft1file(**kwargs),\n ltcube=self.ltcube(**kwargs),\n ccube=self.ccube(**kwargs),\n bexpcube=self.bexpcube(**kwargs),\n srcmaps=self.srcmaps(**kwargs),\n mcube=self.mcube(**kwargs))\n return out_dict"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n data = args.get('data')\n comp = args.get('comp')\n library = args.get('library')\n dry_run = args.get('dry_run', False)\n\n self._set_link('sum-rings', SumRings_SG,\n library=library,\n outdir=args['outdir'],\n dry_run=dry_run)\n\n self._set_link('srcmaps-diffuse', SrcmapsDiffuse_SG,\n comp=comp, data=data,\n library=library,\n make_xml=args['make_xml'],\n dry_run=dry_run)\n\n self._set_link('vstack-diffuse', Vstack_SG,\n comp=comp, data=data,\n library=library,\n dry_run=dry_run)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n data = args.get('data')\n comp = args.get('comp')\n library = args.get('library')\n dry_run = args.get('dry_run', False)\n\n self._set_link('srcmaps-catalog', SrcmapsCatalog_SG,\n comp=comp, data=data,\n library=library,\n nsrc=args.get('nsrc', 500),\n dry_run=dry_run)\n\n self._set_link('gather-srcmaps', GatherSrcmaps_SG,\n comp=comp, data=data,\n library=library,\n dry_run=dry_run)\n \n self._set_link('merge-srcmaps', MergeSrcmaps_SG,\n comp=comp, data=data,\n library=library,\n dry_run=dry_run)"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nmap from the top - level arguments to the arguments provided to the indiviudal links", "response": "def _map_arguments(self, args):\n \"\"\"Map from the top-level arguments to the arguments provided to\n the indiviudal links \"\"\"\n config_yaml = args['config']\n config_dict = load_yaml(config_yaml)\n\n dry_run = args.get('dry_run', False)\n\n data = config_dict.get('data')\n comp = config_dict.get('comp')\n library = config_dict.get('library')\n models = config_dict.get('models')\n scratch = config_dict.get('scratch')\n\n self._set_link('prepare', SplitAndBinChain,\n comp=comp, data=data,\n ft1file=config_dict.get('ft1file'),\n hpx_order_ccube=config_dict.get('hpx_order_ccube'),\n hpx_order_expcube=config_dict.get('hpx_order_expcube'),\n scratch=scratch,\n dry_run=dry_run)\n\n self._set_link('diffuse-comp', DiffuseCompChain,\n comp=comp, data=data,\n library=library,\n make_xml=config_dict.get('make_diffuse_comp_xml', False),\n outdir=config_dict.get('merged_gasmap_dir', 'merged_gasmap'),\n dry_run=dry_run)\n\n self._set_link('catalog-comp', CatalogCompChain,\n comp=comp, data=data,\n library=library,\n make_xml=config_dict.get('make_catalog_comp_xml', False),\n nsrc=config_dict.get('catalog_nsrc', 500),\n dry_run=dry_run)\n \n self._set_link('assemble-model', AssembleModelChain,\n comp=comp, data=data,\n library=library,\n models=models,\n hpx_order=config_dict.get('hpx_order_fitting'),\n dry_run=dry_run)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None):\n\n if fn is None:\n fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type))\n\n if spectrum_type == 'PiecewisePowerLaw': \n build_piecewise_powerlaw(fn, spectral_pars)\n\n for k, v in spectral_pars.items():\n\n v.setdefault('scale', 1.0)\n v.setdefault('min', v['value'] * 1E-3)\n v.setdefault('max', v['value'] * 1E3)\n\n par = fn.getParam(str(k))\n\n vmin = min(float(v['value']), float(v['min']))\n vmax = max(float(v['value']), float(v['max']))\n\n par.setValue(float(v['value']))\n par.setBounds(vmin, vmax)\n par.setScale(float(v['scale']))\n\n if 'free' in v and int(v['free']) != 0:\n par.setFree(True)\n else:\n par.setFree(False)\n fn.setParam(par)\n\n return fn", "response": "Create a Function object from a dictionary of spectral parameters."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef gtlike_spectrum_to_dict(spectrum):\n parameters = pyLike.ParameterVector()\n spectrum.getParams(parameters)\n d = dict(spectrum_type=spectrum.genericName())\n for p in parameters:\n\n pname = p.getName()\n pval = p.getTrueValue()\n perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan\n d[pname] = np.array([pval, perr])\n\n if d['spectrum_type'] == 'FileFunction':\n ff = pyLike.FileFunction_cast(spectrum)\n d['file'] = ff.filename()\n return d", "response": "Convert a pyLikelihood object to a python dictionary which can\n be easily saved to a file."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nconverts a pyLikelihood object to a python dictionary which can be easily saved to a file.", "response": "def gtlike_spectrum_to_vectors(spectrum):\n \"\"\" Convert a pyLikelihood object to a python dictionary which can\n be easily saved to a file.\"\"\"\n\n\n parameters = pyLike.ParameterVector()\n spectrum.getParams(parameters)\n\n npar = max(parameters.size(), 10)\n o = {'param_names': np.zeros(npar, dtype='S32'),\n 'param_values': np.empty(npar, dtype=float) * np.nan,\n 'param_errors': np.empty(npar, dtype=float) * np.nan,\n }\n\n for i, p in enumerate(parameters):\n o['param_names'][i] = p.getName()\n o['param_values'][i] = p.getTrueValue()\n perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan\n o['param_errors'][i] = perr\n\n return o"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_function_pars(fn):\n\n pars = []\n par_names = pyLike.StringVector()\n fn.getParamNames(par_names)\n\n for pname in par_names:\n\n par = fn.getParam(pname)\n bounds = par.getBounds()\n perr = par.error() if par.isFree() else np.nan\n pars += [dict(name=pname,\n value=par.getValue(),\n error=perr,\n min=bounds[0],\n max=bounds[1],\n free=par.isFree(),\n scale=par.getScale())]\n\n return pars", "response": "Extract the parameters of a pyLikelihood function object\n ."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_priors(like):\n\n npar = len(like.params())\n\n vals = np.ones(npar)\n errs = np.ones(npar)\n has_prior = np.array([False] * npar)\n\n for i, p in enumerate(like.params()):\n\n prior = like[i].log_prior()\n\n if prior is None:\n continue\n\n par_names = pyLike.StringVector()\n prior.getParamNames(par_names)\n\n if not 'Mean' in par_names:\n raise Exception('Failed to find Mean in prior parameters.')\n\n if not 'Sigma' in par_names:\n raise Exception('Failed to find Sigma in prior parameters.')\n\n for t in par_names:\n\n if t == 'Mean':\n vals[i] = prior.parameter(t).getValue()\n\n if t == 'Sigma':\n errs[i] = prior.parameter(t).getValue()\n\n has_prior[i] = True\n\n return vals, errs, has_prior", "response": "Extract priors from a likelihood object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_source_pars(src):\n\n fnmap = src.getSrcFuncs()\n\n keys = fnmap.keys()\n\n if 'Position' in keys:\n ppars = get_function_pars(src.getSrcFuncs()[str('Position')])\n elif 'SpatialDist' in keys:\n ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')])\n else:\n raise Exception('Failed to extract spatial parameters.')\n\n fn = src.getSrcFuncs()[str('Spectrum')]\n spars = get_function_pars(fn)\n\n for i, p in enumerate(ppars):\n ppars[i]['is_norm'] = False\n\n for i, p in enumerate(spars):\n\n if fn.normPar().getName() == p['name']:\n spars[i]['is_norm'] = True\n else:\n spars[i]['is_norm'] = False\n\n return spars, ppars", "response": "Extract the parameters associated with a pyLikelihood Source object."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncounts the number of free parameters in the active model.", "response": "def nFreeParams(self):\n \"\"\"Count the number of free parameters in the active model.\"\"\"\n nF = 0\n pars = self.params()\n for par in pars:\n if par.isFree():\n nF += 1\n return nF"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\ncompute the TS value for a source indicated by srcName.", "response": "def Ts2(self, srcName, reoptimize=False, approx=True,\n tol=None, MaxIterations=10, verbosity=0):\n \"\"\"Computes the TS value for a source indicated by \"srcName.\"\n\n If \"reoptimize=True\" is selected this function will reoptimize\n the model up to \"MaxIterations\" given the tolerance \"tol\"\n (default is the tolerance selected for the overall fit). If\n \"appox=True\" is selected (the default) it will renormalize the\n model (see _renorm).\n \"\"\"\n\n saved_state = LikelihoodState(self)\n if verbosity > 0:\n print(\"*** Start Ts_dl ***\")\n source_attributes = self.getExtraSourceAttributes()\n self.logLike.syncParams()\n src = self.logLike.getSource(srcName)\n self._ts_src = src\n freeParams = pyLike.DoubleVector()\n self.logLike.getFreeParamValues(freeParams)\n logLike1 = self.logLike.value()\n self.scaleSource(srcName, 1E-10)\n logLike0 = self.logLike.value()\n if tol is None:\n tol = self.tol\n if reoptimize:\n if verbosity > 0:\n print(\"** Do reoptimize\")\n optFactory = pyLike.OptimizerFactory_instance()\n myOpt = optFactory.create(self.optimizer, self.logLike)\n Niter = 1\n while Niter <= MaxIterations:\n try:\n myOpt.find_min(0, tol)\n break\n except RuntimeError as e:\n print(e)\n if verbosity > 0:\n print(\"** Iteration :\", Niter)\n Niter += 1\n else:\n if approx:\n try:\n self._renorm()\n except ZeroDivisionError:\n pass\n self.logLike.syncParams()\n logLike0 = max(self.logLike.value(), logLike0)\n Ts_value = 2 * (logLike1 - logLike0)\n self.scaleSource(srcName, 1E10)\n\n self.logLike.setFreeParamValues(freeParams)\n self.model = SourceModel(self.logLike)\n for src in source_attributes:\n self.model[src].__dict__.update(source_attributes[src])\n saved_state.restore()\n self.logLike.value()\n return Ts_value"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nhook to inster the name of a logfile into the input config", "response": "def _make_scatter_logfile_name(cls, key, linkname, job_config):\n \"\"\"Hook to inster the name of a logfile into the input config \"\"\"\n logfile = job_config.get('logfile', \"%s_%s_%s.log\" %\n (cls.default_prefix_logfile, linkname, key))\n job_config['logfile'] = logfile"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create(cls, **kwargs):\n linkname = kwargs.setdefault('linkname', cls.clientclass.linkname_default)\n # Don't use setdefault b/c we don't want to build a JobArchive\n # Unless it is needed\n job_archive = kwargs.get('job_archive', None)\n if job_archive is None:\n job_archive = JobArchive.build_temp_job_archive()\n kwargs.setdefault('job_archive', job_archive)\n kwargs_client = dict(linkname=linkname,\n link_prefix=kwargs.get('link_prefix', ''),\n file_stage=kwargs.get('file_stage', None),\n job_archive=job_archive)\n link = cls.clientclass.create(**kwargs_client)\n sg = cls(link, **kwargs)\n return sg", "response": "Build and return a ScatterGather object"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nbuild a dictionary of JobDetails objects for the internal Link.", "response": "def _build_job_dict(self):\n \"\"\"Build a dictionary of `JobDetails` objects for the internal `Link`\"\"\"\n if self.args['dry_run']:\n status = JobStatus.unknown\n else:\n status = JobStatus.not_ready\n\n base_config = self.scatter_link.args\n\n for jobkey, job_config in sorted(self._job_configs.items()):\n full_job_config = base_config.copy()\n full_job_config.update(job_config)\n ScatterGather._make_scatter_logfile_name(jobkey,\n self.linkname, full_job_config)\n logfile = job_config.get('logfile')\n self._scatter_link._register_job(key=jobkey,\n job_config=full_job_config,\n logfile=logfile,\n status=status)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _invoke(self, argv, stream=sys.stdout, resubmit_failed=False):\n args = self._run_argparser(argv)\n\n if args.action not in ACTIONS:\n sys.stderr.write(\n \"Unrecognized action %s, options are %s\\n\" % (args.action, ACTIONS))\n\n if args.action == 'skip':\n return JobStatus.no_job\n elif args.action in ['run', 'resubmit', 'check_status', 'config']:\n self._job_configs = self.build_job_configs(args.__dict__)\n\n self._interface._dry_run = args.dry_run\n\n if args.action == 'run':\n status_vect = self.run_jobs(stream, resubmit_failed=resubmit_failed)\n elif args.action == 'resubmit':\n status_vect = self.resubmit(stream, resubmit_failed=resubmit_failed)\n elif args.action == 'check_status':\n self._build_job_dict()\n status_vect = self.check_status(stream)\n elif args.action == 'config':\n self._build_job_dict()\n status_vect = JobStatusVector()\n status_vect[JobStatus.done] += 1\n\n return status_vect", "response": "Invoke the object to preform a particular action."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nupdate the arguments used to invoke the application.", "response": "def update_args(self, override_args):\n \"\"\"Update the arguments used to invoke the application\n\n Note that this will also update the dictionary of input and output files\n\n Parameters\n ----------\n\n override_args : dict\n dictionary of arguments to override the current values\n \"\"\"\n self.args = extract_arguments(override_args, self.args)\n self._job_configs = self.build_job_configs(self.args)\n if not self._scatter_link.jobs:\n self._build_job_dict()\n self._latch_file_info()"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef clear_jobs(self, recursive=True):\n if recursive:\n self._scatter_link.clear_jobs(recursive)\n self.jobs.clear()", "response": "Clear the self. jobs dictionary that contains information\n about jobs associated with this ScatterGather."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nreturn a dictionary with all the jobs in this link.", "response": "def get_jobs(self, recursive=True):\n \"\"\"Return a dictionary with all the jobs\n\n If recursive is True this will include jobs from all internal `Link`\n \"\"\"\n if recursive:\n ret_dict = self.jobs.copy()\n ret_dict.update(self._scatter_link.get_jobs(recursive))\n return ret_dict\n return self.jobs"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef check_status(self, stream=sys.stdout,\n check_once=False,\n fail_pending=False, fail_running=False,\n no_wait=False, do_print=True,\n write_status=False):\n \"\"\"Loop to check on the status of all the jobs in job dict.\n\n Parameters\n -----------\n stream : `file`\n Stream that this function will print to,\n Must have 'write' function.\n\n check_once : bool\n Check status once and exit loop.\n\n fail_pending : `bool`\n If True, consider pending jobs as failed\n\n fail_running : `bool`\n If True, consider running jobs as failed\n\n no_wait : bool\n Do not sleep before checking jobs.\n\n do_print : bool\n Print summary stats.\n\n write_status : bool\n Write the status the to log file.\n\n Returns\n -------\n status_vect : `JobStatusVector`\n Vector that summarize the number of jobs in various states.\n \"\"\"\n running = True\n first = True\n\n if not check_once:\n if stream != sys.stdout:\n sys.stdout.write('Checking status (%is): ' %\n self.args['job_check_sleep'])\n sys.stdout.flush()\n\n status_vect = JobStatusVector()\n while running:\n if first:\n first = False\n elif self.args['dry_run']:\n break\n elif no_wait:\n pass\n else:\n stream.write(\"Sleeping %.0f seconds between status checks\\n\" %\n self.args['job_check_sleep'])\n if stream != sys.stdout:\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(self.args['job_check_sleep'])\n\n status_vect = self._check_link_completion(self._scatter_link,\n fail_pending, fail_running)\n if self.args['check_status_once'] or check_once or no_wait:\n if do_print:\n self.print_update(stream, status_vect)\n break\n\n if self.args['print_update']:\n if do_print:\n self.print_update(stream, status_vect)\n\n if self._job_archive is not None:\n self._job_archive.write_table_file()\n\n n_total = status_vect.n_total\n n_done = status_vect.n_done\n n_failed = status_vect.n_failed\n if n_done + n_failed == n_total:\n running = False\n\n status = status_vect.get_status()\n if status in [JobStatus.failed, JobStatus.partial_failed]:\n if do_print:\n self.print_update(stream, status_vect)\n self.print_failed(stream)\n if write_status:\n self._write_status_to_log(status, stream)\n else:\n if write_status:\n self._write_status_to_log(0, stream)\n\n self._set_status_self(status=status) \n if not check_once:\n if stream != sys.stdout:\n sys.stdout.write(\"! %s\\n\" % (JOB_STATUS_STRINGS[status]))\n\n if self._job_archive is not None:\n self._job_archive.write_table_file()\n\n return status_vect", "response": "Loop to check the status of all the jobs in the job dict."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef run_jobs(self, stream=sys.stdout, resubmit_failed=False):\n self._build_job_dict()\n\n self._interface._dry_run = self.args['dry_run']\n scatter_status = self._interface.submit_jobs(self.scatter_link,\n job_archive=self._job_archive,\n stream=stream)\n if scatter_status == JobStatus.failed:\n return JobStatus.failed\n\n status_vect = self.check_status(stream, write_status=True)\n status = status_vect.get_status()\n\n if status == JobStatus.partial_failed:\n if resubmit_failed:\n sys.write(\"Resubmitting partially failed link %s\\n\" %\n self.full_linkname)\n status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True)\n else:\n sys.stdout.write(\"NOT resubmitting partially failed link %s\\n\" %\n self.full_linkname)\n return status_vect", "response": "Function to dipatch jobs and collect results"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef resubmit(self, stream=sys.stdout, fail_running=False, resubmit_failed=False):\n self._build_job_dict()\n status_vect = self.check_status(stream, check_once=True, fail_pending=True,\n fail_running=fail_running)\n status = status_vect.get_status()\n if status == JobStatus.done:\n return status\n\n failed_jobs = self._scatter_link.get_failed_jobs(True, True)\n if failed_jobs:\n scatter_status = self._interface.submit_jobs(self._scatter_link, failed_jobs,\n job_archive=self._job_archive,\n stream=stream)\n if scatter_status == JobStatus.failed:\n return JobStatus.failed\n\n status_vect = self.check_status(stream, write_status=True)\n status = status_vect.get_status()\n\n if status == JobStatus.partial_failed:\n if resubmit_failed:\n sys.stdout.write(\"Resubmitting partially failed link %s\\n\" %\n self.full_linkname)\n status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=False)\n else:\n sys.stdout.write(\"NOT resubmitting partially failed link %s\\n\" %\n self.full_linkname)\n\n if self.args['dry_run']:\n return JobStatus.unknown\n return status_vect", "response": "Function to resubmit failed jobs and collect results"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef clean_jobs(self, recursive=False):\n self._interface.clean_jobs(self.scatter_link,\n clean_all=recursive)", "response": "Clean up all the jobs associated with this object."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef print_summary(self, stream=sys.stdout, indent=\"\", recurse_level=2):\n Link.print_summary(self, stream, indent, recurse_level)\n if recurse_level > 0:\n recurse_level -= 1\n indent += \" \"\n stream.write(\"\\n\")\n self._scatter_link.print_summary(stream, indent, recurse_level)", "response": "Print a summary of the activity done by this Link."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef print_update(self, stream=sys.stdout, job_stats=None):\n if job_stats is None:\n job_stats = JobStatusVector()\n job_det_list = []\n job_det_list += self._scatter_link.jobs.values()\n\n for job_dets in job_det_list:\n if job_dets.status == JobStatus.no_job:\n continue\n job_stats[job_dets.status] += 1\n\n stream.write(\"Status :\\n Total : %i\\n Unknown: %i\\n\" %\n (job_stats.n_total, job_stats[JobStatus.unknown]))\n stream.write(\" Not Ready: %i\\n Ready: %i\\n\" %\n (job_stats[JobStatus.not_ready], job_stats[JobStatus.ready]))\n stream.write(\" Pending: %i\\n Running: %i\\n\" %\n (job_stats[JobStatus.pending], job_stats[JobStatus.running]))\n stream.write(\" Done: %i\\n Failed: %i\\n\" %\n (job_stats[JobStatus.done], job_stats[JobStatus.failed]))", "response": "Print an update about the current number of jobs running"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef print_failed(self, stream=sys.stderr):\n for job_key, job_details in sorted(self.scatter_link.jobs.items()):\n if job_details.status == JobStatus.failed:\n stream.write(\"Failed job %s\\n log = %s\\n\" %\n (job_key, job_details.logfile))", "response": "Print the list of failed jobs"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nopens a numpy pickle file and read all the new sources into a dictionary", "response": "def read_sources_from_numpy_file(npfile):\n \"\"\" Open a numpy pickle file and read all the new sources into a dictionary\n\n Parameters\n ----------\n npfile : file name\n The input numpy pickle file\n\n Returns\n -------\n tab : `~astropy.table.Table`\n \"\"\"\n srcs = np.load(npfile).flat[0]['sources']\n\n roi = ROIModel()\n roi.load_sources(srcs.values())\n return roi.create_table()"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nopens a yaml file and read all the new sources into a dictionary", "response": "def read_sources_from_yaml_file(yamlfile):\n \"\"\" Open a yaml file and read all the new sources into a dictionary\n\n Parameters\n ----------\n yaml : file name\n The input yaml file\n\n Returns\n -------\n tab : `~astropy.table.Table`\n \"\"\"\n f = open(yamlfile)\n dd = yaml.load(f)\n srcs = dd['sources']\n f.close()\n roi = ROIModel()\n roi.load_sources(srcs.values())\n return roi.create_table()"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nappend the sources in a table into another table.", "response": "def merge_source_tables(src_tab, tab, all_sources=False, prefix=\"\", suffix=\"\",\n roi_idx=None):\n \"\"\"Append the sources in a table into another table.\n\n Parameters\n ----------\n src_tab : `~astropy.table.Table` \n Master source table that will be appended with the sources in\n ``tab``.\n\n tab : `~astropy.table.Table`\n Table to be merged into ``src_tab``. \n\n all_sources : bool\n If true, then all the sources get added to the table. \n if false, then only the sources that start with 'PS' get added\n\n prefix : str\n Prepended to all source names\n\n suffix : str\n Appended to all source names\n\n Returns\n -------\n tab : `~astropy.table.Table`\n\n \"\"\"\n if roi_idx is not None and 'roi' not in tab.columns:\n tab.add_column(Column(name='roi', data=len(tab) * [roi_idx]))\n\n remove_rows = []\n for i, row in enumerate(tab):\n\n if not all_sources and row['name'].find(\"PS\") != 0:\n remove_rows += [i]\n continue\n sname = \"%s%s%s\" % (prefix, row['name'], suffix)\n row['name'] = sname\n\n tab.remove_rows(remove_rows)\n\n if src_tab is None:\n src_tab = tab\n else:\n src_tab = vstack([src_tab, tab], join_type='outer')\n\n return src_tab"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef lightcurve(self, name, **kwargs):\n\n name = self.roi.get_source_by_name(name).name\n\n # Create schema for method configuration\n schema = ConfigSchema(self.defaults['lightcurve'],\n optimizer=self.defaults['optimizer'])\n schema.add_option('prefix', '')\n config = utils.create_dict(self.config['lightcurve'],\n optimizer=self.config['optimizer'])\n config = schema.create_config(config, **kwargs)\n\n self.logger.info('Computing Lightcurve for %s' % name)\n\n o = self._make_lc(name, **config)\n filename = utils.format_filename(self.workdir, 'lightcurve',\n prefix=[config['prefix'],\n name.lower().replace(' ', '_')])\n\n o['file'] = None\n if config['write_fits']:\n o['file'] = os.path.basename(filename) + '.fits'\n self._make_lc_fits(o, filename + '.fits', **config)\n\n if config['write_npy']:\n np.save(filename + '.npy', o)\n\n self.logger.info('Finished Lightcurve')\n\n return o", "response": "Generate a lightcurve for the named source."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, costh_edges):\n\n if len(tab_gti) == 0:\n shape = (len(costh_edges) - 1, len(skydir))\n return (np.zeros(shape), np.zeros(shape))\n\n m = (tab_sc['START'] < tab_gti['STOP'][-1])\n m &= (tab_sc['STOP'] > tab_gti['START'][0])\n tab_sc = tab_sc[m]\n\n cos_zmax = np.cos(np.radians(zmax))\n sc_t0 = np.array(tab_sc['START'].data)\n sc_t1 = np.array(tab_sc['STOP'].data)\n sc_live = np.array(tab_sc['LIVETIME'].data)\n sc_lfrac = sc_live / (sc_t1 - sc_t0)\n\n sc_xyz = angle_to_cartesian(np.radians(tab_sc['RA_SCZ'].data),\n np.radians(tab_sc['DEC_SCZ'].data))\n zn_xyz = angle_to_cartesian(np.radians(tab_sc['RA_ZENITH'].data),\n np.radians(tab_sc['DEC_ZENITH'].data))\n\n tab_gti_t0 = np.array(tab_gti['START'].data)\n tab_gti_t1 = np.array(tab_gti['STOP'].data)\n\n # Index of the closest GTI interval\n idx = np.digitize(sc_t0, tab_gti_t0) - 1\n\n # start/stop time of closest GTI interval\n gti_t0 = np.zeros_like(sc_t0)\n gti_t1 = np.zeros_like(sc_t1)\n gti_t0[idx >= 0] = tab_gti_t0[idx[idx >= 0]]\n gti_t1[idx >= 0] = tab_gti_t1[idx[idx >= 0]]\n\n nbin = len(costh_edges) - 1\n\n lt = np.zeros((nbin,) + skydir.shape)\n lt_wt = np.zeros((nbin,) + skydir.shape)\n\n m0 = (idx >= 0) & (sc_t0 >= gti_t0) & (sc_t1 <= gti_t1)\n\n xyz = angle_to_cartesian(skydir.ra.rad, skydir.dec.rad)\n\n for i, t in enumerate(xyz):\n cos_sep = utils.dot_prod(t, sc_xyz)\n cos_zn = utils.dot_prod(t, zn_xyz)\n m = m0 & (cos_zn > cos_zmax) & (cos_sep > 0.0)\n bins = np.digitize(cos_sep[m], bins=costh_edges) - 1\n bins = np.clip(bins, 0, nbin - 1)\n lt[:, i] = np.bincount(bins, weights=sc_live[m], minlength=nbin)\n lt_wt[:, i] = np.bincount(bins, weights=sc_live[m] * sc_lfrac[m],\n minlength=nbin)\n\n return lt, lt_wt", "response": "Generate a sequence of livetime distributions at the given skydir."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create(cls, ltfile):\n\n if not re.search('\\.txt?', ltfile) is None:\n files = np.loadtxt(ltfile, unpack=True, dtype='str')\n elif not isinstance(ltfile, list):\n files = glob.glob(ltfile)\n\n ltc = cls.create_from_fits(files[0])\n for f in files[1:]:\n ltc.load_ltfile(f)\n\n return ltc", "response": "Create a livetime cube from a single file or list of files."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_empty(cls, tstart, tstop, fill=0.0, nside=64):\n cth_edges = np.linspace(0, 1.0, 41)\n domega = utils.edge_to_width(cth_edges) * 2.0 * np.pi\n hpx = HPX(nside, True, 'CEL', ebins=cth_edges)\n data = np.ones((len(cth_edges) - 1, hpx.npix)) * fill\n return cls(data, hpx, cth_edges, tstart=tstart, tstop=tstop)", "response": "Create an empty livetime cube."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef get_skydir_lthist(self, skydir, cth_bins):\n ra = skydir.ra.deg\n dec = skydir.dec.deg\n\n npts = 1\n bins = utils.split_bin_edges(cth_bins, npts)\n\n center = edge_to_center(bins)\n width = edge_to_width(bins)\n ipix = hp.ang2pix(self.hpx.nside, np.pi / 2. - np.radians(dec),\n np.radians(ra), nest=self.hpx.nest)\n lt = np.histogram(self._cth_center,\n weights=self.data[:, ipix], bins=bins)[0]\n lt = np.sum(lt.reshape(-1, npts), axis=1)\n return lt", "response": "Get the livetime distribution for a given sky - location."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_skydir_ltcube(self, skydir, tab_sc, tab_gti, zmax):\n\n skydir = SkyCoord(np.array([skydir.ra.deg]),\n np.array([skydir.dec.deg]), unit='deg')\n\n lt, lt_wt = fill_livetime_hist(skydir, tab_sc, tab_gti, zmax,\n self.costh_edges)\n\n ipix = self.hpx.skydir_to_pixel(skydir)\n\n lt_scale = np.ones_like(lt)\n lt_wt_scale = np.ones_like(lt_wt)\n m = self.data[:, ipix] > 0.0\n\n lt_scale[m] = lt[m] / self.data[:, ipix][m]\n lt_wt_scale[m] = lt_wt[m] / self._data_wt[:, ipix][m]\n data = self.data * lt_scale\n data_wt = self._data_wt * lt_wt_scale\n return LTCube(data, copy.deepcopy(self.hpx), self.costh_edges,\n # tstart=np.min(tab_gti_t0),\n # tstop=np.max(tab_gti_t1),\n zmax=zmax, data_wt=data_wt)", "response": "This method creates a new livetime cube by scaling this one by the the\n observing profile ratio in the direction skydir."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nwrites the livetime cube to a FITS file.", "response": "def write(self, outfile):\n \"\"\"Write the livetime cube to a FITS file.\"\"\"\n\n hdu_pri = fits.PrimaryHDU()\n\n hdu_exp = self._create_exp_hdu(self.data)\n hdu_exp.name = 'EXPOSURE'\n hdu_exp_wt = self._create_exp_hdu(self._data_wt)\n hdu_exp_wt.name = 'WEIGHTED_EXPOSURE'\n\n cols = [Column(name='CTHETA_MIN', dtype='f4',\n data=self.costh_edges[:-1][::-1]),\n Column(name='CTHETA_MAX', dtype='f4',\n data=self.costh_edges[1:][::-1]), ]\n hdu_bnds = fits.table_to_hdu(Table(cols))\n hdu_bnds.name = 'CTHETABOUNDS'\n\n hdu_gti = fits.table_to_hdu(self._tab_gti)\n hdu_gti.name = 'GTI'\n\n hdus = [hdu_pri, hdu_exp, hdu_exp_wt,\n hdu_bnds, hdu_gti]\n\n for hdu in hdus:\n hdu.header['TSTART'] = self.tstart\n hdu.header['TSTOP'] = self.tstop\n\n with fits.HDUList(hdus) as hdulist:\n hdulist.writeto(outfile, clobber=True)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef main():\n usage = \"usage: %(prog)s [options] \"\n description = \"Merge a set of Fermi-LAT files.\"\n\n parser = argparse.ArgumentParser(usage=usage, description=description)\n\n parser.add_argument('-o', '--output', default=None, type=str,\n help='Output file.')\n parser.add_argument('--clobber', default=False, action='store_true',\n help='Overwrite output file.')\n parser.add_argument('--hdu', default=None, type=str,\n help='HDU name.')\n parser.add_argument('--gzip', action='store_true',\n help='Compress output file')\n parser.add_argument('--rm', action='store_true',\n help='Remove input files.')\n parser.add_argument('files', nargs='+', default=None,\n help='List of input files.')\n\n args = parser.parse_args()\n\n hpx_map = merge_utils.stack_energy_planes_hpx(args.files, hdu=args.hdu)\n\n if args.output:\n hpx_map.hpx.write_fits(hpx_map.counts, args.output,\n extname=args.hdu, clobber=args.clobber)\n\n if args.gzip:\n os.system('gzip -9 %s' % args.output)\n\n if args.rm:\n for farg in args.files:\n flist = glob.glob(farg)\n for ffound in flist:\n os.path.unlink(ffound)", "response": "This function is called by the command line interface to merge a set of Fermi - LAT files."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef make_cube_slice(map_in, loge_bounds):\n # FIXME: This functionality should be moved into a slice method of\n # gammapy.maps\n axis = map_in.geom.axes[0]\n i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0]\n i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0]\n new_axis = map_in.geom.axes[0].slice(slice(i0, i1))\n geom = map_in.geom.to_image()\n geom = geom.to_cube([new_axis])\n map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy())\n return map_out", "response": "Extract a slice from a map cube object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef plot_sed(sed, showlnl=False, **kwargs):\n\n ax = kwargs.pop('ax', plt.gca())\n cmap = kwargs.get('cmap', 'BuGn')\n\n annotate_name(sed, ax=ax)\n SEDPlotter.plot_flux_points(sed, **kwargs)\n\n if np.any(sed['ts'] > 9.):\n\n if 'model_flux' in sed:\n SEDPlotter.plot_model(sed['model_flux'],\n noband=showlnl, **kwargs)\n\n if showlnl:\n SEDPlotter.plot_lnlscan(sed, **kwargs)\n\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel('Energy [MeV]')\n ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]')", "response": "Render a plot of a spectral energy distribution."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef make_residmap_plots(self, maps, roi=None, **kwargs):\n\n fmt = kwargs.get('format', self.config['format'])\n figsize = kwargs.get('figsize', self.config['figsize'])\n workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])\n use_weights = kwargs.pop('use_weights', False)\n # FIXME, how to set this:\n no_contour = False\n zoom = kwargs.get('zoom', None)\n\n kwargs.setdefault('graticule_radii', self.config['graticule_radii'])\n kwargs.setdefault('label_ts_threshold',\n self.config['label_ts_threshold'])\n cmap = kwargs.setdefault('cmap', self.config['cmap'])\n cmap_resid = kwargs.pop('cmap_resid', self.config['cmap_resid'])\n kwargs.setdefault('catalogs', self.config['catalogs'])\n if no_contour:\n sigma_levels = None\n else:\n sigma_levels = [-5, -3, 3, 5, 7] + list(np.logspace(1, 3, 17))\n\n load_bluered_cmap()\n\n prefix = maps['name']\n mask = maps['mask']\n if use_weights:\n sigma_hist_data = maps['sigma'].data[maps['mask'].data.astype(\n bool)]\n maps['sigma'].data *= maps['mask'].data\n maps['data'].data *= maps['mask'].data\n maps['model'].data *= maps['mask'].data\n maps['excess'].data *= maps['mask'].data\n else:\n sigma_hist_data = maps['sigma'].data\n\n fig = plt.figure(figsize=figsize)\n p = ROIPlotter(maps['sigma'], roi=roi, **kwargs)\n p.plot(vmin=-5, vmax=5, levels=sigma_levels,\n cb_label='Significance [$\\sigma$]', interpolation='bicubic',\n cmap=cmap_resid, zoom=zoom)\n plt.savefig(utils.format_filename(workdir,\n 'residmap_sigma',\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)\n\n # make and draw histogram\n fig, ax = plt.subplots(figsize=figsize)\n nBins = np.linspace(-6, 6, 121)\n data = np.nan_to_num(sigma_hist_data)\n # find best fit parameters\n mu, sigma = norm.fit(data.flatten())\n # make and draw the histogram\n data[data > 6.0] = 6.0\n data[data < -6.0] = -6.0\n\n n, bins, patches = ax.hist(data.flatten(), nBins, density=True,\n histtype='stepfilled',\n facecolor='green', alpha=0.75)\n # make and draw best fit line\n y = norm.pdf(bins, mu, sigma)\n ax.plot(bins, y, 'r--', linewidth=2)\n y = norm.pdf(bins, 0.0, 1.0)\n ax.plot(bins, y, 'k', linewidth=1)\n\n # labels and such\n ax.set_xlabel(r'Significance ($\\sigma$)')\n ax.set_ylabel('Probability')\n paramtext = 'Gaussian fit:\\n'\n paramtext += '$\\\\mu=%.2f$\\n' % mu\n paramtext += '$\\\\sigma=%.2f$' % sigma\n ax.text(0.05, 0.95, paramtext, verticalalignment='top',\n horizontalalignment='left', transform=ax.transAxes)\n\n plt.savefig(utils.format_filename(workdir,\n 'residmap_sigma_hist',\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)\n\n vmax = max(np.max(maps['data'].data), np.max(maps['model'].data))\n vmin = min(np.min(maps['data'].data), np.min(maps['model'].data))\n\n fig = plt.figure(figsize=figsize)\n p = ROIPlotter(maps['data'], roi=roi, **kwargs)\n p.plot(cb_label='Counts', interpolation='bicubic',\n cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)\n plt.savefig(utils.format_filename(workdir,\n 'residmap_data',\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)\n\n fig = plt.figure(figsize=figsize)\n p = ROIPlotter(maps['model'], roi=roi, **kwargs)\n p.plot(cb_label='Counts', interpolation='bicubic',\n cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)\n plt.savefig(utils.format_filename(workdir,\n 'residmap_model',\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)\n\n fig = plt.figure(figsize=figsize)\n p = ROIPlotter(maps['excess'], roi=roi, **kwargs)\n p.plot(cb_label='Counts', interpolation='bicubic',\n cmap=cmap_resid)\n plt.savefig(utils.format_filename(workdir,\n 'residmap_excess',\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)", "response": "Make plots from the output of a GTAnalysis. ResidualMap."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nmakes plots from the output of `~fermipy.gtanalysis.GTAnalysis.tsmap` or `~fermipy.gtanalysis.GTAnalysis.tscube`. This method generates a 2D sky map for the best-fit test source in sqrt(TS) and Npred. Parameters ---------- maps : dict Output dictionary of `~fermipy.gtanalysis.GTAnalysis.tsmap` or `~fermipy.gtanalysis.GTAnalysis.tscube`. roi : `~fermipy.roi_model.ROIModel` ROI Model object. Generate markers at the positions of the sources in this ROI. zoom : float Crop the image by this factor. If None then no crop is applied.", "response": "def make_tsmap_plots(self, maps, roi=None, **kwargs):\n \"\"\"Make plots from the output of\n `~fermipy.gtanalysis.GTAnalysis.tsmap` or\n `~fermipy.gtanalysis.GTAnalysis.tscube`. This method\n generates a 2D sky map for the best-fit test source in\n sqrt(TS) and Npred.\n\n Parameters\n ----------\n maps : dict\n Output dictionary of\n `~fermipy.gtanalysis.GTAnalysis.tsmap` or\n `~fermipy.gtanalysis.GTAnalysis.tscube`.\n\n roi : `~fermipy.roi_model.ROIModel`\n ROI Model object. Generate markers at the positions of\n the sources in this ROI.\n\n zoom : float\n Crop the image by this factor. If None then no crop is\n applied.\n \"\"\"\n kwargs.setdefault('graticule_radii', self.config['graticule_radii'])\n kwargs.setdefault('label_ts_threshold',\n self.config['label_ts_threshold'])\n kwargs.setdefault('cmap', self.config['cmap'])\n kwargs.setdefault('catalogs', self.config['catalogs'])\n fmt = kwargs.get('format', self.config['format'])\n figsize = kwargs.get('figsize', self.config['figsize'])\n workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])\n suffix = kwargs.pop('suffix', 'tsmap')\n zoom = kwargs.pop('zoom', None)\n\n if 'ts' not in maps:\n return\n\n sigma_levels = [3, 5, 7] + list(np.logspace(1, 3, 17))\n prefix = maps['name']\n fig = plt.figure(figsize=figsize)\n p = ROIPlotter(maps['sqrt_ts'], roi=roi, **kwargs)\n p.plot(vmin=0, vmax=5, levels=sigma_levels,\n cb_label='Sqrt(TS) [$\\sigma$]', interpolation='bicubic',\n zoom=zoom)\n plt.savefig(utils.format_filename(workdir,\n '%s_sqrt_ts' % suffix,\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)\n\n fig = plt.figure(figsize=figsize)\n p = ROIPlotter(maps['npred'], roi=roi, **kwargs)\n p.plot(vmin=0, cb_label='NPred [Counts]', interpolation='bicubic',\n zoom=zoom)\n plt.savefig(utils.format_filename(workdir,\n '%s_npred' % suffix,\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)\n\n # make and draw histogram\n fig, ax = plt.subplots(figsize=figsize)\n bins = np.linspace(0, 25, 101)\n\n data = np.nan_to_num(maps['ts'].data.T)\n data[data > 25.0] = 25.0\n data[data < 0.0] = 0.0\n n, bins, patches = ax.hist(data.flatten(), bins, density=True,\n histtype='stepfilled',\n facecolor='green', alpha=0.75)\n # ax.plot(bins,(1-chi2.cdf(x,dof))/2.,**kwargs)\n ax.plot(bins, 0.5 * chi2.pdf(bins, 1.0), color='k',\n label=r\"$\\chi^2_{1} / 2$\")\n ax.set_yscale('log')\n ax.set_ylim(1E-4)\n ax.legend(loc='upper right', frameon=False)\n\n # labels and such\n ax.set_xlabel('TS')\n ax.set_ylabel('Probability')\n plt.savefig(utils.format_filename(workdir,\n '%s_ts_hist' % suffix,\n prefix=[prefix],\n extension=fmt))\n plt.close(fig)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef make_roi_plots(self, gta, mcube_tot, **kwargs):\n\n fmt = kwargs.get('format', self.config['format'])\n figsize = kwargs.get('figsize', self.config['figsize'])\n prefix = kwargs.get('prefix', '')\n loge_bounds = kwargs.get('loge_bounds', None)\n weighted = kwargs.get('weighted', False)\n\n roi_kwargs = {}\n roi_kwargs.setdefault('loge_bounds', loge_bounds)\n roi_kwargs.setdefault(\n 'graticule_radii', self.config['graticule_radii'])\n roi_kwargs.setdefault('label_ts_threshold',\n self.config['label_ts_threshold'])\n roi_kwargs.setdefault('cmap', self.config['cmap'])\n roi_kwargs.setdefault('catalogs', self._catalogs)\n\n if loge_bounds is None:\n loge_bounds = (gta.log_energies[0], gta.log_energies[-1])\n esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])\n\n mcube_diffuse = gta.model_counts_map('diffuse')\n counts_map = gta.counts_map()\n\n if weighted:\n wmap = gta.weight_map()\n counts_map = copy.deepcopy(counts_map)\n mcube_tot = copy.deepcopy(mcube_tot)\n counts_map.data *= wmap.data\n mcube_tot.data *= wmap.data\n mcube_diffuse.data *= wmap.data\n\n # colors = ['k', 'b', 'g', 'r']\n data_style = {'marker': 's', 'linestyle': 'None'}\n\n fig = plt.figure(figsize=figsize)\n\n if gta.projtype == \"WCS\":\n xmin = -1\n xmax = 1\n elif gta.projtype == \"HPX\":\n hpx2wcs = counts_map.make_wcs_mapping(proj='CAR', oversample=2)\n counts_map = counts_map.to_wcs(hpx2wcs=hpx2wcs)\n mcube_tot = mcube_tot.to_wcs(hpx2wcs=hpx2wcs)\n mcube_diffuse = mcube_diffuse.to_wcs(hpx2wcs=hpx2wcs)\n xmin = None\n xmax = None\n\n fig = plt.figure(figsize=figsize)\n rp = ROIPlotter(mcube_tot, roi=gta.roi, **roi_kwargs)\n rp.plot(cb_label='Counts', zscale='pow', gamma=1. / 3.)\n plt.savefig(os.path.join(gta.config['fileio']['workdir'],\n '%s_model_map%s.%s' % (\n prefix, esuffix, fmt)))\n plt.close(fig)\n\n rp = ROIPlotter(counts_map, roi=gta.roi, **roi_kwargs)\n\n rp.plot(cb_label='Counts', zscale='sqrt')\n plt.savefig(os.path.join(gta.config['fileio']['workdir'],\n '%s_counts_map%s.%s' % (\n prefix, esuffix, fmt)))\n plt.close(fig)\n\n for iaxis, xlabel, psuffix in zip([0, 1],\n ['LON Offset [deg]', 'LAT Offset [deg]'],\n ['xproj', 'yproj']):\n\n fig = plt.figure(figsize=figsize)\n rp.plot_projection(iaxis, label='Data', color='k',\n xmin=xmin, xmax=xmax, **data_style)\n rp.plot_projection(iaxis, data=mcube_tot, label='Model', xmin=xmin, xmax=xmax,\n noerror=True)\n rp.plot_projection(iaxis, data=mcube_diffuse, label='Diffuse', xmin=xmin, xmax=xmax,\n noerror=True)\n plt.gca().set_ylabel('Counts')\n plt.gca().set_xlabel(xlabel)\n plt.gca().legend(frameon=False)\n annotate(loge_bounds=loge_bounds)\n plt.savefig(os.path.join(gta.config['fileio']['workdir'],\n '%s_counts_map_%s%s.%s' % (prefix, psuffix,\n esuffix, fmt)))\n plt.close(fig)", "response": "Make various diagnostic plots for the 1D and 2D counts and model distributions."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef extract_parameters(pil, keys=None):\n out_dict = {}\n if keys is None:\n keys = pil.keys()\n for key in keys:\n try:\n out_dict[key] = pil[key]\n except ValueError:\n out_dict[key] = None\n return out_dict", "response": "Extract and return parameter names and values from a pil object\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef update_gtapp(gtapp, **kwargs):\n for key, val in kwargs.items():\n if key in ['pfiles', 'scratch']:\n continue\n if val is None:\n continue\n try:\n gtapp[key] = val\n except ValueError:\n raise ValueError(\n \"gtapp failed to set parameter %s %s\" % (key, val))\n except KeyError:\n raise KeyError(\"gtapp failed to set parameter %s %s\" % (key, val))", "response": "Update the parameters of the object that can run ScienceTools applications in question"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nset the PFILES env var to the value of the current PFILES envar .", "response": "def _set_pfiles(dry_run, **kwargs):\n \"\"\"Set the PFILES env var\n \n Parameters\n ----------\n\n dry_run : bool\n Don't actually run\n\n Keyword arguments\n -----------------\n\n pfiles : str\n Value to set PFILES\n\n Returns\n -------\n\n pfiles_orig : str\n Current value of PFILES envar\n \"\"\"\n pfiles_orig = os.environ['PFILES']\n pfiles = kwargs.get('pfiles', None)\n if pfiles:\n if dry_run:\n print(\"mkdir %s\" % pfiles)\n else:\n try:\n os.makedirs(pfiles)\n except OSError:\n pass\n pfiles = \"%s:%s\" % (pfiles, pfiles_orig)\n os.environ['PFILES'] = pfiles\n return pfiles_orig"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nbuild an object that can run ScienceTools application in question", "response": "def build_gtapp(appname, dry_run, **kwargs):\n \"\"\"Build an object that can run ScienceTools application\n\n Parameters\n ----------\n appname : str\n Name of the application (e.g., gtbin)\n\n dry_run : bool\n Print command but do not run it\n\n kwargs : arguments used to invoke the application\n\n Returns `GtApp.GtApp` object that will run the application in question\n \"\"\"\n pfiles_orig = _set_pfiles(dry_run, **kwargs)\n gtapp = GtApp.GtApp(appname)\n update_gtapp(gtapp, **kwargs)\n _reset_pfiles(pfiles_orig)\n return gtapp"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef run_gtapp(gtapp, stream, dry_run, **kwargs):\n if stream is None:\n stream = sys.stdout\n\n pfiles_orig = _set_pfiles(dry_run, **kwargs) \n update_gtapp(gtapp, **kwargs)\n\n stream.write(\"%s\\n\" % gtapp.command())\n stream.flush()\n if dry_run:\n _reset_pfiles(pfiles_orig) \n return 0\n\n try:\n stdin, stdout = gtapp.runWithOutput(print_command=False)\n for line in stdout:\n stream.write(line.strip())\n stream.flush()\n return_code = 0\n except:\n stream.write('Exited with exit code -1\\n')\n return_code = -1\n\n _reset_pfiles(pfiles_orig)\n return return_code", "response": "Runs one on the ScienceTools apps\n object"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef update_args(self, override_args):\n Link.update_args(self, override_args)\n dry_run = override_args.get('dry_run', False)\n if self.__app is None:\n self.__app = build_gtapp(self.appname, dry_run, **self.args)\n#except:\n# raise ValueError(\"Failed to build link %s %s %s\" %\n# (self.linkname, self.appname, self.args))\n else:\n update_gtapp(self.__app, **self.args)", "response": "Update the argument used to invoke the application\n "} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef run_command(self, stream=sys.stdout, dry_run=False):\n return run_gtapp(self.__app, stream, dry_run, **self.args)", "response": "Runs the command for this link."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef command_template(self):\n com_out = self.appname\n for key, val in self.args.items():\n if key in self._options:\n com_out += ' %s={%s}' % (key, key)\n else:\n com_out += ' %s=%s' % (key, val)\n return com_out", "response": "Build and return a string that can be used as a template invoking\n this chain from the command line."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef run_analysis(self, argv):\n args = self._parser.parse_args(argv)\n components = Component.build_from_yamlfile(args.comp)\n NAME_FACTORY.update_base_dict(args.data)\n model_dict = make_library(**args.__dict__)\n model_manager = model_dict['ModelManager']\n models = load_yaml(args.models)\n data = args.data\n hpx_order = args.hpx_order\n for modelkey in models:\n model_manager.make_srcmap_manifest(modelkey, components, data)\n model_manager.make_fermipy_config_yaml(modelkey, components, data,\n hpx_order=hpx_order,\n irf_ver=NAME_FACTORY.irf_ver())", "response": "Build the manifest for all the models\n "} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef copy_ccube(ccube, outsrcmap, hpx_order):\n sys.stdout.write(\" Copying counts cube from %s to %s\\n\" % (ccube, outsrcmap))\n try:\n hdulist_in = fits.open(ccube)\n except IOError:\n hdulist_in = fits.open(\"%s.gz\" % ccube)\n\n hpx_order_in = hdulist_in[1].header['ORDER']\n\n if hpx_order_in > hpx_order:\n hpxmap = HpxMap.create_from_hdulist(hdulist_in)\n hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)\n hpxlist_out = hdulist_in\n #hpxlist_out['SKYMAP'] = hpxmap_out.create_image_hdu()\n hpxlist_out[1] = hpxmap_out.create_image_hdu()\n hpxlist_out[1].name = 'SKYMAP'\n hpxlist_out.writeto(outsrcmap)\n return hpx_order\n else:\n os.system('cp %s %s' % (ccube, outsrcmap))\n #os.system('cp %s.gz %s.gz' % (ccube, outsrcmap))\n #os.system('gunzip -f %s.gz' % (outsrcmap))\n return None", "response": "Copy a counts cube into outsrcmap file with HEALPix order to hpx_order."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef append_hdus(hdulist, srcmap_file, source_names, hpx_order):\n sys.stdout.write(\" Extracting %i sources from %s\" % (len(source_names), srcmap_file))\n try:\n hdulist_in = fits.open(srcmap_file)\n except IOError:\n try:\n hdulist_in = fits.open('%s.gz' % srcmap_file)\n except IOError:\n sys.stdout.write(\" Missing file %s\\n\" % srcmap_file)\n return\n\n for source_name in source_names:\n sys.stdout.write('.')\n sys.stdout.flush()\n if hpx_order is None:\n hdulist.append(hdulist_in[source_name])\n else:\n try:\n hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name)\n except IndexError:\n print(\" Index error on source %s in file %s\" % (source_name, srcmap_file))\n continue\n except KeyError:\n print(\" Key error on source %s in file %s\" % (source_name, srcmap_file))\n continue\n hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)\n hdulist.append(hpxmap_out.create_image_hdu(name=source_name))\n sys.stdout.write(\"\\n\")\n hdulist.flush()\n hdulist_in.close()", "response": "Append HEALPix maps to a list of HEALPix maps"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nassembling the source map file for one binning component.", "response": "def assemble_component(compname, compinfo, hpx_order):\n \"\"\"Assemble the source map file for one binning component\n\n Parameters\n ----------\n\n compname : str\n The key for this component (e.g., E0_PSF3)\n compinfo : dict\n Information about this component\n hpx_order : int\n Maximum order for maps\n\n \"\"\"\n sys.stdout.write(\"Working on component %s\\n\" % compname)\n ccube = compinfo['ccube']\n outsrcmap = compinfo['outsrcmap']\n source_dict = compinfo['source_dict']\n\n hpx_order = AssembleModel.copy_ccube(ccube, outsrcmap, hpx_order)\n hdulist = AssembleModel.open_outsrcmap(outsrcmap)\n\n for comp_name in sorted(source_dict.keys()):\n source_info = source_dict[comp_name]\n source_names = source_info['source_names']\n srcmap_file = source_info['srcmap_file']\n AssembleModel.append_hdus(hdulist, srcmap_file,\n source_names, hpx_order)\n sys.stdout.write(\"Done!\\n\")"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef run_analysis(self, argv):\n args = self._parser.parse_args(argv)\n manifest = yaml.safe_load(open(args.input))\n\n compname = args.compname\n value = manifest[compname]\n self.assemble_component(compname, value, args.hpx_order)", "response": "Assemble the source map file for one binning component FIXME - use this method instead of the base class"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef build_job_configs(self, args):\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n NAME_FACTORY.update_base_dict(args['data'])\n\n models = load_yaml(args['models'])\n\n for modelkey in models:\n manifest = os.path.join('analysis', 'model_%s' % modelkey,\n 'srcmap_manifest_%s.yaml' % modelkey)\n for comp in components:\n key = comp.make_key('{ebin_name}_{evtype_name}')\n fullkey = \"%s_%s\" % (modelkey, key)\n outfile = NAME_FACTORY.merged_srcmaps(modelkey=modelkey,\n component=key,\n coordsys=comp.coordsys,\n mktime='none',\n irf_ver=NAME_FACTORY.irf_ver())\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[fullkey] = dict(input=manifest,\n compname=key,\n logfile=logfile)\n return job_configs", "response": "Hook to build job configurations from the input data and the output files of the components."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _map_arguments(self, input_dict):\n data = input_dict.get('data')\n comp = input_dict.get('comp')\n library = input_dict.get('library')\n models = input_dict.get('models')\n hpx_order = input_dict.get('hpx_order_fitting')\n dry_run = input_dict.get('dry_run', False)\n\n self._set_link('init-model', InitModel,\n comp=comp, data=data,\n library=library,\n models=models,\n hpx_order=hpx_order,\n dry_run=dry_run)\n\n self._set_link('assemble-model', AssembleModel_SG,\n comp=comp, data=data,\n models=models)", "response": "Map from the top - level arguments to the arguments provided to\n the indiviudal links"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nhook to build job configurations based on the arguments passed in.", "response": "def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if sim is not None:\n raise ValueError(\"Found 'sim' argument on AnalyzeROI_SG config.\")\n if targets_yaml is None:\n return job_configs\n\n config_yaml = 'config.yaml'\n config_override = args.get('config')\n if is_not_null(config_override):\n config_yaml = config_override\n\n targets = load_yaml(targets_yaml)\n base_config = dict(roi_baseline=args['roi_baseline'],\n make_plots=args['make_plots'])\n\n for target_name in targets.keys():\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n fullpath=True)\n target_dir = NAME_FACTORY.targetdir(**name_keys)\n config_path = os.path.join(target_dir, config_yaml)\n logfile = make_nfs_path(os.path.join(\n target_dir, \"%s_%s.log\" % (self.linkname, target_name)))\n job_config = base_config.copy() \n job_config.update(dict(config=config_path,\n logfile=logfile))\n job_configs[target_name] = job_config\n\n return job_configs"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreceives data from the device.", "response": "def read_data(self, size):\n \"\"\"Receive data from the device.\n\n If the read fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param size: the number of bytes to read.\n\n :type size: int\n\n :return: the data received.\n\n :rtype: list(int)\n\n \"\"\"\n result = list()\n data = ctypes.create_string_buffer(8)\n while size > 0:\n length = min(size, 8)\n n = hidapi.hid_read_timeout(self.device, data, length, 100)\n if n <= 0:\n raise IOError(\n 'pywws.device_ctypes_hidapi.USBDevice.read_data failed')\n for i in range(n):\n result.append(ord(data[i]))\n size -= n\n return result"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef write_data(self, buf):\n data = ''.join(map(chr, buf))\n size = len(data)\n if hidapi.hid_write(self.device, ctypes.c_char_p(data), size) != size:\n raise IOError(\n 'pywws.device_ctypes_hidapi.USBDevice.write_data failed')\n return True", "response": "Send data to the device."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef zambretti_code(params, hourly_data):\n north = literal_eval(params.get('Zambretti', 'north', 'True'))\n baro_upper = float(params.get('Zambretti', 'baro upper', '1050.0'))\n baro_lower = float(params.get('Zambretti', 'baro lower', '950.0'))\n if not hourly_data['rel_pressure']:\n return ''\n if hourly_data['wind_ave'] is None or hourly_data['wind_ave'] < 0.3:\n wind = None\n else:\n wind = hourly_data['wind_dir']\n if hourly_data['pressure_trend'] is None:\n trend = 0.0\n else:\n trend = hourly_data['pressure_trend'] / 3.0\n # normalise pressure\n pressure = 950.0 + (\n (1050.0 - 950.0) * (hourly_data['rel_pressure'] - baro_lower) /\n (baro_upper - baro_lower))\n # adjust pressure for wind direction\n if wind is not None:\n if not isinstance(wind, int):\n wind = int(wind + 0.5) % 16\n if not north:\n # southern hemisphere, so add 180 degrees\n wind = (wind + 8) % 16\n pressure += ( 5.2, 4.2, 3.2, 1.05, -1.1, -3.15, -5.2, -8.35,\n -11.5, -9.4, -7.3, -5.25, -3.2, -1.15, 0.9, 3.05)[wind]\n # compute base forecast from pressure and trend (hPa / hour)\n summer = north == (hourly_data['idx'].month >= 4 and\n hourly_data['idx'].month <= 9)\n if trend >= 0.1:\n # rising pressure\n if summer:\n pressure += 3.2\n F = 0.1740 * (1031.40 - pressure)\n LUT = ('A', 'B', 'B', 'C', 'F', 'G', 'I', 'J', 'L', 'M', 'M', 'Q', 'T',\n 'Y')\n elif trend <= -0.1:\n # falling pressure\n if summer:\n pressure -= 3.2\n F = 0.1553 * (1029.95 - pressure)\n LUT = ('B', 'D', 'H', 'O', 'R', 'U', 'V', 'X', 'X', 'Z')\n else:\n # steady\n F = 0.2314 * (1030.81 - pressure)\n LUT = ('A', 'B', 'B', 'B', 'E', 'K', 'N', 'N', 'P', 'P', 'S', 'W', 'W',\n 'X', 'X', 'X', 'Z')\n # clip to range of lookup table\n F = min(max(int(F + 0.5), 0), len(LUT) - 1)\n # convert to letter code\n return LUT[F]", "response": "Simple implementation of Zambretti forecaster algorithm."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef read_block(self, address):\n buf = [\n self.ReadCommand,\n address // 256,\n address % 256,\n self.EndMark,\n self.ReadCommand,\n address // 256,\n address % 256,\n self.EndMark,\n ]\n if not self.dev.write_data(buf):\n return None\n return self.dev.read_data(32)", "response": "Read 32 bytes from the weather station."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nwrite a single byte to the weather station.", "response": "def write_byte(self, address, data):\n \"\"\"Write a single byte to the weather station.\n\n :param address: address to write to.\n\n :type address: int\n\n :param data: the value to write.\n\n :type data: int\n\n :return: success status.\n\n :rtype: bool\n\n \"\"\"\n buf = [\n self.WriteCommandWord,\n address // 256,\n address % 256,\n self.EndMark,\n self.WriteCommandWord,\n data,\n 0,\n self.EndMark,\n ]\n if not self.dev.write_data(buf):\n return False\n buf = self.dev.read_data(8)\n if buf is None:\n return False\n for byte in buf:\n if byte != 0xA5:\n return False\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ngets next circular buffer data pointer.", "response": "def inc_ptr(self, ptr):\n \"\"\"Get next circular buffer data pointer.\"\"\"\n result = ptr + self.reading_len[self.ws_type]\n if result >= 0x10000:\n result = self.data_start\n return result"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef dec_ptr(self, ptr):\n result = ptr - self.reading_len[self.ws_type]\n if result < self.data_start:\n result = 0x10000 - self.reading_len[self.ws_type]\n return result", "response": "Get previous circular buffer data pointer."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_raw_data(self, ptr, unbuffered=False):\n if unbuffered:\n self._data_pos = None\n # round down ptr to a 'block boundary'\n idx = ptr - (ptr % 0x20)\n ptr -= idx\n count = self.reading_len[self.ws_type]\n if self._data_pos == idx:\n # cache contains useful data\n result = self._data_block[ptr:ptr + count]\n if len(result) >= count:\n return result\n else:\n result = list()\n if ptr + count > 0x20:\n # need part of next block, which may be in cache\n if self._data_pos != idx + 0x20:\n self._data_pos = idx + 0x20\n self._data_block = self._read_block(self._data_pos)\n result += self._data_block[0:ptr + count - 0x20]\n if len(result) >= count:\n return result\n # read current block\n self._data_pos = idx\n self._data_block = self._read_block(self._data_pos)\n result = self._data_block[ptr:ptr + count] + result\n return result", "response": "Get raw data from circular buffer."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_data(self, ptr, unbuffered=False):\n result = _decode(self.get_raw_data(ptr, unbuffered),\n self._reading_format[self.ws_type])\n return result", "response": "Get decoded data from circular buffer."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef current_pos(self):\n new_ptr = _decode(\n self._read_fixed_block(0x0020), self.lo_fix_format['current_pos'])\n if new_ptr == self._current_ptr:\n return self._current_ptr\n if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr):\n logger.error(\n 'unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr)\n self._current_ptr = new_ptr\n return self._current_ptr", "response": "Get the current circular buffer location where current data is being written."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_raw_fixed_block(self, unbuffered=False):\n if unbuffered or not self._fixed_block:\n self._fixed_block = self._read_fixed_block()\n return self._fixed_block", "response": "Get the raw fixed block of settings and min / max data."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets the decoded fixed block of settings and min / max data.", "response": "def get_fixed_block(self, keys=[], unbuffered=False):\n \"\"\"Get the decoded \"fixed block\" of settings and min/max data.\n\n A subset of the entire block can be selected by keys.\"\"\"\n if unbuffered or not self._fixed_block:\n self._fixed_block = self._read_fixed_block()\n format = self.fixed_format\n # navigate down list of keys to get to wanted data\n for key in keys:\n format = format[key]\n return _decode(self._fixed_block, format)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef write_data(self, data):\n # send data\n for ptr, value in data:\n self._write_byte(ptr, value)\n # set 'data changed'\n self._write_byte(self.fixed_format['data_changed'][0], 0xAA)\n # wait for station to clear 'data changed'\n while True:\n ack = _decode(\n self._read_fixed_block(0x0020), self.fixed_format['data_changed'])\n if ack == 0:\n break\n logger.debug('write_data waiting for ack')\n time.sleep(6)", "response": "Write a set of single bytes to the weather station."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _find_device(self, idVendor, idProduct):\n for bus in usb.busses():\n for device in bus.devices:\n if (device.idVendor == idVendor and\n device.idProduct == idProduct):\n return device\n return None", "response": "Find a USB device by vendor and product id."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreceives data from the device.", "response": "def read_data(self, size):\n \"\"\"Receive data from the device.\n\n If the read fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param size: the number of bytes to read.\n\n :type size: int\n\n :return: the data received.\n\n :rtype: list(int)\n\n \"\"\"\n result = self.devh.interruptRead(0x81, size, 1200)\n if result is None or len(result) < size:\n raise IOError('pywws.device_libusb.USBDevice.read_data failed')\n return list(result)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nsend data to the device.", "response": "def write_data(self, buf):\n \"\"\"Send data to the device.\n\n If the write fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param buf: the data to send.\n\n :type buf: list(int)\n\n :return: success status.\n\n :rtype: bool\n\n \"\"\"\n result = self.devh.controlMsg(\n usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,\n usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)\n if result != len(buf):\n raise IOError('pywws.device_libusb.USBDevice.write_data failed')\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _adapt_WSDateTime(dt):\n try:\n ts = int(\n (dt.replace(tzinfo=pytz.utc)\n - datetime(1970,1,1,tzinfo=pytz.utc)\n ).total_seconds()\n )\n except (OverflowError,OSError):\n if dt < datetime.now():\n ts = 0\n else:\n ts = 2**63-1\n return ts", "response": "Adapt the datetime like input into a unix timestamp."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning the predicate portion of the SQL query string that is used to select the items from the database.", "response": "def _predicate(self, i):\n \"\"\"Given a valid datetime or slace, return the predicate portion\n of the SQL query, a boolean indicating whether multiple items are\n expected from the result, and a dictionary of parameters for the query\n \"\"\"\n if isinstance(i, slice):\n if i.step is not None:\n raise TypeError(\"Slice step not permitted\")\n if ( (i.start is not None and not isinstance(i.start, datetime))\n or (i.stop is not None and not isinstance(i.stop, datetime))\n ):\n raise TypeError(\n \"Slice indices must be {} or None\".format(datetime)\n )\n if i.start is not None and i.stop is not None:\n if i.start > i.stop:\n raise ValueError(\n \"Start index is greater than the End index\"\n )\n else:\n # Substitution of the key coloumn, but the\n # parameters themselves will be substituted by sqlite3\n predicate = \"WHERE {} BETWEEN :start AND :stop\".format(\n self._keycol\n )\n elif i.start is not None:\n # i.stop will also be None\n predicate = \"WHERE {} >= :start\".format(self._keycol)\n elif i.stop is not None:\n # i.start will also be None\n predicate = \"WHERE {} <= :stop\".format(self._keycol)\n else:\n # both are None, so equivelent to wanting everything\n predicate = \"\"\n multi = True\n pred = {\"start\": i.start, \"stop\": i.stop}\n elif isinstance(i, datetime):\n # Substitution of the key coloumn, but the\n # parameters themselves will be substituted by sqlite3\n predicate = \"WHERE {} = :key\".format(self._keycol)\n multi = False\n pred = {\"key\": i}\n else:\n # not a slice or a datetime object\n raise TypeError(\"List indices must be {}\".format(datetime))\n # predicate is the end of the query string.\n # multi is a boolean indicating whether the result should be iterable\n # or not. pred is a dict of the parameters for substitution\n return (predicate, multi, pred)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef update(self, i):\n key_list = self.key_list\n keynone = {key:None for key in key_list}\n # Generator which fills in missing data from the original iterator\n def datagen(i):\n for datum in i:\n tmp = keynone.copy()\n tmp.update(datum)\n yield tmp\n with self._connection as con:\n con.executemany(\n \"\"\"INSERT OR REPLACE INTO {table} ({keylist})\n VALUES (:{vallist});\n \"\"\".format(table=self.table,\n keylist=\", \".join(self.key_list),\n vallist=\", :\".join(self.key_list)\n ), datagen(i))", "response": "Update the dictionary D with the contents of i."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef before(self, i):\n if not isinstance(i, datetime):\n raise TypeError(\"'{}' is not a datetime object\".format(i))\n else:\n result = self._connection.execute(\n \"\"\"SELECT {selkeycol} FROM {table} WHERE\n {keycol} < :key ORDER BY {keycol} DESC LIMIT 1;\n \"\"\".format(\n selkeycol=self.selkeycol,\n table=self.table,\n keycol=self._keycol\n ), {\"key\":i}).fetchone()\n return result[self._keycol] if result is not None else None", "response": "Return datetime of newest existing data record whose datetime\n is < idx."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nclearing all items from the database.", "response": "def clear(self):\n \"\"\"S.clear() -> None -- remove all items from S\"\"\"\n with self._connection as con:\n con.execute(\"DELETE FROM {};\".format(self.table))"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef set_locale(lang):\n # get the default locale\n lc, encoding = locale.getdefaultlocale()\n try:\n if '.' in lang:\n locale.setlocale(locale.LC_ALL, lang)\n else:\n locale.setlocale(locale.LC_ALL, (lang, encoding))\n except locale.Error:\n return False\n return True", "response": "Set the locale used by a language code."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsets the translation used by pywws modules.", "response": "def set_translation(lang):\n \"\"\"Set the translation used by (some) pywws modules.\n\n This sets the translation object ``pywws.localisation.translation``\n to use a particular language.\n\n The ``lang`` parameter can be any string of the form ``en``,\n ``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is\n ignored. In the case of a string such as ``en_GB``, the routine\n will search for an ``en_GB`` language file before searching for an\n ``en`` one.\n\n :param lang: language code.\n :type lang: string\n :return: success status.\n :rtype: bool\n\n \"\"\"\n global translation\n # make list of possible languages, in order of preference\n langs = list()\n if lang:\n if '.' in lang:\n lang = lang.split('.')[0]\n langs += [lang, lang[:2]]\n # get translation object\n path = pkg_resources.resource_filename('pywws', 'lang')\n codeset = locale.getpreferredencoding()\n if codeset == 'ASCII':\n codeset = 'UTF-8'\n try:\n translation = gettext.translation(\n 'pywws', path, languages=langs, codeset=codeset)\n # Python 3 translations don't have a ugettext method\n if not hasattr(translation, 'ugettext'):\n translation.ugettext = translation.gettext\n except IOError:\n return False\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsets the locale and translation for a pywws program.", "response": "def set_application_language(params):\n \"\"\"Set the locale and translation for a pywws program.\n\n This function reads the language from the configuration file, then\n calls :func:`set_locale` and :func:`set_translation`.\n\n :param params: a :class:`pywws.storage.params` object.\n\n :type params: object\n\n \"\"\"\n lang = params.get('config', 'language', None)\n if lang:\n set_locale(lang)\n set_translation(lang)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef read_data(self, size):\n result = list()\n while size > 0:\n count = min(size, 8)\n buf = self.hid.read(count)\n if len(buf) < count:\n raise IOError(\n 'pywws.device_cython_hidapi.USBDevice.read_data failed')\n result += buf\n size -= count\n return result", "response": "Receive data from the device."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nsends data to the device.", "response": "def write_data(self, buf):\n \"\"\"Send data to the device.\n\n :param buf: the data to send.\n\n :type buf: list(int)\n\n :return: success status.\n\n :rtype: bool\n\n \"\"\"\n if self.hid.write(buf) != len(buf):\n raise IOError(\n 'pywws.device_cython_hidapi.USBDevice.write_data failed')\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nconverts any timestamp to local time.", "response": "def to_local(self, dt):\n \"\"\"Convert any timestamp to local time (with tzinfo).\"\"\"\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=self.utc)\n return dt.astimezone(self.local)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nconverting any timestamp to UTC ( with tzinfo.", "response": "def to_utc(self, dt):\n \"\"\"Convert any timestamp to UTC (with tzinfo).\"\"\"\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self.utc)\n return dt.astimezone(self.utc)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef to_naive(self, dt):\n if dt.tzinfo is None:\n return dt\n return dt.astimezone(self.utc).replace(tzinfo=None)", "response": "Convert any timestamp to pywws ( utc no tzinfo."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef local_replace(self, dt, use_dst=True, _recurse=False, **kwds):\n local_time = dt + self.standard_offset\n if use_dst:\n dst_offset = self.dst(local_time)\n if dst_offset:\n local_time += dst_offset\n adjusted_time = local_time.replace(**kwds)\n if adjusted_time > local_time and not _recurse:\n return self.local_replace(\n dt - DAY, use_dst=use_dst, _recurse=True, **kwds)\n adjusted_time -= dst_offset\n if self.dst(adjusted_time):\n return adjusted_time - self.standard_offset\n adjusted_time = local_time.replace(**kwds)\n if use_dst:\n dst_offset = self.dst(adjusted_time)\n adjusted_time -= dst_offset\n if adjusted_time > local_time and not _recurse:\n return self.local_replace(\n dt - DAY, use_dst=use_dst, _recurse=True, **kwds)\n return adjusted_time - self.standard_offset", "response": "Return the pywws timestamp for the most recent\nRecognizer local time before the datetime dt with datetime replace\n applied."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get(self, section, option, default=None):\n with self._lock:\n if not self._config.has_option(section, option):\n if default is not None:\n self._set(section, option, default)\n return default\n return self._config.get(section, option)", "response": "Get a parameter value and return a string."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef set(self, section, option, value):\n with self._lock:\n self._set(section, option, value)", "response": "Set option in section to string value."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef unset(self, section, option):\n with self._lock:\n if not self._config.has_section(section):\n return\n if self._config.has_option(section, option):\n self._config.remove_option(section, option)\n self._dirty = True\n if not self._config.options(section):\n self._config.remove_section(section)\n self._dirty = True", "response": "Removes an option from a section."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef check_params(self, *keys):\n\n for key in keys:\n if not self.params[key]:\n raise RuntimeError('\"{}\" not set in weather.ini'.format(key))", "response": "Ensure user has set required values in weather. ini."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef monitor(i):\n count = 0\n for x in i:\n count+=1\n if count % 10000 == 0:\n logger.info(\"%d records so far, current record is %s\",\n count, x[\"idx\"])\n yield x", "response": "Given an iterator yields data from it\n but prints progress every 10 000 records"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef calibrate_data(params, raw_data, calib_data):\n start = calib_data.before(datetime.max)\n if start is None:\n start = datetime.min\n start = raw_data.after(start + SECOND)\n if start is None:\n return start\n del calib_data[start:]\n calibrator = Calib(params, raw_data)\n def calibgen(inputdata):\n \"\"\"Internal generator function\"\"\"\n count = 0\n for data in inputdata:\n idx = data['idx']\n count += 1\n if count % 10000 == 0:\n logger.info(\"calib: %s\", idx.isoformat(' '))\n elif count % 500 == 0:\n logger.debug(\"calib: %s\", idx.isoformat(' '))\n for key in ('rain', 'abs_pressure', 'temp_in'):\n if data[key] is None:\n logger.error('Ignoring invalid data at %s', idx.isoformat(' '))\n break\n else:\n yield calibrator.calib(data)\n calib_data.update(calibgen(raw_data[start:]))\n return start", "response": "Calibrate raw data using a user - supplied function."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef generate_hourly(calib_data, hourly_data, process_from):\n start = hourly_data.before(datetime.max)\n if start is None:\n start = datetime.min\n start = calib_data.after(start + SECOND)\n if process_from:\n if start:\n start = min(start, process_from)\n else:\n start = process_from\n if start is None:\n return start\n # set start of hour in local time (not all time offsets are integer hours)\n start += timezone.standard_offset\n start = start.replace(minute=0, second=0)\n start -= timezone.standard_offset\n del hourly_data[start:]\n # preload pressure history, and find last valid rain\n prev = None\n pressure_history = deque()\n last_rain = None\n for data in calib_data[start - HOURx3:start]:\n if data['rel_pressure']:\n pressure_history.append((data['idx'], data['rel_pressure']))\n if data['rain'] is not None:\n last_rain = data['rain']\n prev = data\n # iterate over data in one hour chunks\n stop = calib_data.before(datetime.max)\n acc = HourAcc(last_rain)\n def hourlygen(inputdata, prev):\n \"\"\"Internal generator function\"\"\"\n hour_start = start\n count = 0\n while hour_start <= stop:\n count += 1\n if count % 1008 == 0:\n logger.info(\"hourly: %s\", hour_start.isoformat(' '))\n elif count % 24 == 0:\n logger.debug(\"hourly: %s\", hour_start.isoformat(' '))\n hour_end = hour_start + HOUR\n acc.reset()\n for data in inputdata[hour_start:hour_end]:\n if data['rel_pressure']:\n pressure_history.append((data['idx'], data['rel_pressure']))\n if prev:\n err = data['idx'] - prev['idx']\n if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR:\n logger.info('unexpected data interval %s %s',\n data['idx'].isoformat(' '), str(err))\n acc.add_raw(data)\n prev = data\n new_data = acc.result()\n if new_data and (new_data['idx'] - hour_start) >= timedelta(minutes=9):\n # compute pressure trend\n new_data['pressure_trend'] = None\n if new_data['rel_pressure']:\n target = new_data['idx'] - HOURx3\n while (len(pressure_history) >= 2 and\n abs(pressure_history[0][0] - target) >\n abs(pressure_history[1][0] - target)):\n pressure_history.popleft()\n if (pressure_history and\n abs(pressure_history[0][0] - target) < HOUR):\n new_data['pressure_trend'] = (\n new_data['rel_pressure'] - pressure_history[0][1])\n # store new hourly data\n yield new_data\n hour_start = hour_end\n hourly_data.update(hourlygen(calib_data, prev))\n return start", "response": "Generate hourly summaries from calibrated data."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ngenerate daily summaries from calibrated and hourly data.", "response": "def generate_daily(day_end_hour, use_dst,\n calib_data, hourly_data, daily_data, process_from):\n \"\"\"Generate daily summaries from calibrated and hourly data.\"\"\"\n start = daily_data.before(datetime.max)\n if start is None:\n start = datetime.min\n start = calib_data.after(start + SECOND)\n if process_from:\n if start:\n start = min(start, process_from)\n else:\n start = process_from\n if start is None:\n return start\n # round to start of this day, in local time\n start = timezone.local_replace(\n start, use_dst=use_dst, hour=day_end_hour, minute=0, second=0)\n del daily_data[start:]\n stop = calib_data.before(datetime.max)\n acc = DayAcc()\n def dailygen(inputdata):\n \"\"\"Internal generator function\"\"\"\n day_start = start\n count = 0\n while day_start <= stop:\n count += 1\n if count % 30 == 0:\n logger.info(\"daily: %s\", day_start.isoformat(' '))\n else:\n logger.debug(\"daily: %s\", day_start.isoformat(' '))\n day_end = day_start + DAY\n if use_dst:\n # day might be 23 or 25 hours long\n day_end = timezone.local_replace(\n day_end + HOURx3, use_dst=use_dst, hour=day_end_hour)\n acc.reset()\n for data in inputdata[day_start:day_end]:\n acc.add_raw(data)\n for data in hourly_data[day_start:day_end]:\n acc.add_hourly(data)\n new_data = acc.result()\n if new_data:\n new_data['start'] = day_start\n yield new_data\n day_start = day_end\n daily_data.update(dailygen(calib_data))\n return start"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef generate_monthly(rain_day_threshold, day_end_hour, use_dst,\n daily_data, monthly_data, process_from):\n \"\"\"Generate monthly summaries from daily data.\"\"\"\n start = monthly_data.before(datetime.max)\n if start is None:\n start = datetime.min\n start = daily_data.after(start + SECOND)\n if process_from:\n if start:\n start = min(start, process_from)\n else:\n start = process_from\n if start is None:\n return start\n # set start to start of first day of month (local time)\n start = timezone.local_replace(\n start, use_dst=use_dst, day=1, hour=day_end_hour, minute=0, second=0)\n if day_end_hour >= 12:\n # month actually starts on the last day of previous month\n start -= DAY\n del monthly_data[start:]\n stop = daily_data.before(datetime.max)\n if stop is None:\n return None\n acc = MonthAcc(rain_day_threshold)\n def monthlygen(inputdata):\n \"\"\"Internal generator function\"\"\"\n month_start = start\n count = 0\n while month_start <= stop:\n count += 1\n if count % 12 == 0:\n logger.info(\"monthly: %s\", month_start.isoformat(' '))\n else:\n logger.debug(\"monthly: %s\", month_start.isoformat(' '))\n month_end = month_start + WEEK\n if month_end.month < 12:\n month_end = month_end.replace(month=month_end.month+1)\n else:\n month_end = month_end.replace(month=1, year=month_end.year+1)\n month_end = month_end - WEEK\n if use_dst:\n # month might straddle summer time start or end\n month_end = timezone.local_replace(\n month_end + HOURx3, use_dst=use_dst, hour=day_end_hour)\n acc.reset()\n for data in inputdata[month_start:month_end]:\n acc.add_daily(data)\n new_data = acc.result()\n if new_data:\n new_data['start'] = month_start\n yield new_data\n month_start = month_end\n monthly_data.update(monthlygen(daily_data))\n return start", "response": "Generate monthly summaries from daily data."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef process_data(context):\n logger.info('Generating summary data')\n # get time of last record\n last_raw = context.raw_data.before(datetime.max)\n if last_raw is None:\n raise IOError('No data found. Check data directory parameter.')\n # get daytime end hour (in local time)\n day_end_hour, use_dst = get_day_end_hour(context.params)\n # get other config\n rain_day_threshold = float(\n context.params.get('config', 'rain day threshold', '0.2'))\n # calibrate raw data\n start = calibrate_data(context.params, context.raw_data, context.calib_data)\n # generate hourly data\n start = generate_hourly(context.calib_data, context.hourly_data, start)\n # generate daily data\n start = generate_daily(day_end_hour, use_dst,\n context.calib_data, context.hourly_data, context.daily_data, start)\n # generate monthly data\n generate_monthly(rain_day_threshold, day_end_hour, use_dst,\n context.daily_data, context.monthly_data, start)\n return 0", "response": "Generate summaries from raw weather station data."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nreceiving data from the device.", "response": "def read_data(self, size):\n \"\"\"Receive data from the device.\n\n If the read fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param size: the number of bytes to read.\n\n :type size: int\n\n :return: the data received.\n\n :rtype: list(int)\n\n \"\"\"\n result = self.dev.bulkRead(0x81, size, timeout=1200)\n if not result or len(result) < size:\n raise IOError('pywws.device_libusb1.USBDevice.read_data failed')\n # Python2 libusb1 version 1.5 and earlier returns a string\n if not isinstance(result[0], int):\n result = map(ord, result)\n return list(result)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nsend data to the device.", "response": "def write_data(self, buf):\n \"\"\"Send data to the device.\n\n If the write fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param buf: the data to send.\n\n :type buf: list(int)\n\n :return: success status.\n\n :rtype: bool\n\n \"\"\"\n if sys.version_info[0] < 3:\n str_buf = ''.join(map(chr, buf))\n else:\n str_buf = bytes(buf)\n result = self.dev.controlWrite(\n libusb1.LIBUSB_ENDPOINT_OUT | libusb1.LIBUSB_TYPE_CLASS |\n libusb1.LIBUSB_RECIPIENT_INTERFACE,\n libusb1.LIBUSB_REQUEST_SET_CONFIGURATION,\n 0x200, 0, str_buf, timeout=50)\n if result != len(buf):\n raise IOError('pywws.device_libusb1.USBDevice.write_data failed')\n return True"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef before(self, idx):\n if not isinstance(idx, datetime):\n raise TypeError(\"'%s' is not %s\" % (idx, datetime))\n day = min(idx.date(), self._hi_limit - DAY)\n while day >= self._lo_limit:\n if day < self._rd_cache.lo or day >= self._rd_cache.hi:\n self._load(self._rd_cache, day)\n self._rd_cache.set_ptr(idx)\n if self._rd_cache.ptr > 0:\n return self._rd_cache.data[self._rd_cache.ptr - 1]['idx']\n day = self._rd_cache.lo - DAY\n return None", "response": "Return datetime of newest existing data record whose\n datetime is < idx."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef after(self, idx):\n if not isinstance(idx, datetime):\n raise TypeError(\"'%s' is not %s\" % (idx, datetime))\n day = max(idx.date(), self._lo_limit)\n while day < self._hi_limit:\n if day < self._rd_cache.lo or day >= self._rd_cache.hi:\n self._load(self._rd_cache, day)\n self._rd_cache.set_ptr(idx)\n if self._rd_cache.ptr < len(self._rd_cache.data):\n return self._rd_cache.data[self._rd_cache.ptr]['idx']\n day = self._rd_cache.hi\n return None", "response": "Return datetime of oldest existing data record whose\n datetime is < idx."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef nearest(self, idx):\n hi = self.after(idx)\n lo = self.before(idx)\n if hi is None:\n return lo\n if lo is None:\n return hi\n if abs(hi - idx) < abs(lo - idx):\n return hi\n return lo", "response": "Return datetime of record whose datetime is nearest idx."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nclear all data from the data store permanently", "response": "def clear(self):\n \"\"\"Clears all data from the data store permanently\"\"\"\n for root, dirs, files in os.walk(self._root_dir, topdown=False):\n for file in files:\n os.unlink(os.path.join(root, file))\n os.rmdir(root)\n # Get the root dir back and re-initialise to start again\n root_dir = os.path.abspath(\n os.path.join(self._root_dir, os.pardir))\n self.__init__(root_dir)"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreceives data from the device.", "response": "def read_data(self, size):\n \"\"\"Receive data from the device.\n\n If the read fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param size: the number of bytes to read.\n\n :type size: int\n\n :return: the data received.\n\n :rtype: list(int)\n\n \"\"\"\n result = self.dev.read(0x81, size, timeout=1200)\n if not result or len(result) < size:\n raise IOError('pywws.device_pyusb1.USBDevice.read_data failed')\n return list(result)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nsending data to the device.", "response": "def write_data(self, buf):\n \"\"\"Send data to the device.\n\n If the write fails for any reason, an :obj:`IOError` exception\n is raised.\n\n :param buf: the data to send.\n\n :type buf: list(int)\n\n :return: success status.\n\n :rtype: bool\n\n \"\"\"\n bmRequestType = usb.util.build_request_type(\n usb.util.ENDPOINT_OUT,\n usb.util.CTRL_TYPE_CLASS,\n usb.util.CTRL_RECIPIENT_INTERFACE\n )\n result = self.dev.ctrl_transfer(\n bmRequestType=bmRequestType,\n bRequest=usb.REQ_SET_CONFIGURATION,\n data_or_wLength=buf,\n wValue=0x200,\n timeout=50)\n if result != len(buf):\n raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')\n return True"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef pressure_trend_text(trend):\n _ = pywws.localisation.translation.ugettext\n if trend > 6.0:\n return _(u'rising very rapidly')\n elif trend > 3.5:\n return _(u'rising quickly')\n elif trend > 1.5:\n return _(u'rising')\n elif trend >= 0.1:\n return _(u'rising slowly')\n elif trend < -6.0:\n return _(u'falling very rapidly')\n elif trend < -3.5:\n return _(u'falling quickly')\n elif trend < -1.5:\n return _(u'falling')\n elif trend <= -0.1:\n return _(u'falling slowly')\n return _(u'steady')", "response": "Convert pressure trend to a string as used by the UK met\n office."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef winddir_average(data, threshold, min_count, decay=1.0):\n wind_filter = pywws.process.WindFilter()\n count = 0\n for item in data:\n wind_filter.add(item)\n if item['wind_dir'] is not None:\n count += 1\n if count < min_count:\n return None\n speed, direction = wind_filter.result()\n if speed is None or speed < threshold:\n return None\n return direction * 22.5", "response": "Compute the average wind direction for a slice of data."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef winddir_text(pts):\n \"Convert wind direction from 0..15 to compass point text\"\n global _winddir_text_array\n if pts is None:\n return None\n if not isinstance(pts, int):\n pts = int(pts + 0.5) % 16\n if not _winddir_text_array:\n _ = pywws.localisation.translation.ugettext\n _winddir_text_array = (\n _(u'N'), _(u'NNE'), _(u'NE'), _(u'ENE'),\n _(u'E'), _(u'ESE'), _(u'SE'), _(u'SSE'),\n _(u'S'), _(u'SSW'), _(u'SW'), _(u'WSW'),\n _(u'W'), _(u'WNW'), _(u'NW'), _(u'NNW'),\n )\n return _winddir_text_array[pts]", "response": "Convert wind direction from 0.. 15 to compass point text"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef wind_bft(ms):\n \"Convert wind from metres per second to Beaufort scale\"\n if ms is None:\n return None\n for bft in range(len(_bft_threshold)):\n if ms < _bft_threshold[bft]:\n return bft\n return len(_bft_threshold)", "response": "Convert wind from metres per second to Beaufort scale"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef dew_point(temp, hum):\n if temp is None or hum is None:\n return None\n a = 17.27\n b = 237.7\n gamma = ((a * temp) / (b + temp)) + math.log(float(hum) / 100.0)\n return (b * gamma) / (a - gamma)", "response": "Compute dew point using formula from\n http://en. wikidata. org."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef cadhumidex(temp, humidity):\n \"Calculate Humidity Index as per Canadian Weather Standards\"\n if temp is None or humidity is None:\n return None\n # Formulas are adapted to not use e^(...) with no appreciable\n # change in accuracy (0.0227%)\n saturation_pressure = (6.112 * (10.0**(7.5 * temp / (237.7 + temp))) *\n float(humidity) / 100.0)\n return temp + (0.555 * (saturation_pressure - 10.0))", "response": "Calculate Humidity Index as per Canadian Weather Standards"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef usaheatindex(temp, humidity, dew=None):\n if temp is None or humidity is None:\n return None\n if dew is None:\n dew = dew_point(temp, humidity)\n if temp < 26.7 or humidity < 40 or dew < 12.0:\n return temp\n T = (temp * 1.8) + 32.0\n R = humidity\n c_1 = -42.379\n c_2 = 2.04901523\n c_3 = 10.14333127\n c_4 = -0.22475541\n c_5 = -0.00683783\n c_6 = -0.05481717\n c_7 = 0.00122874\n c_8 = 0.00085282\n c_9 = -0.00000199\n return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) +\n (c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) +\n (c_9 * (T**2) * (R**2))) - 32.0) / 1.8", "response": "Calculate the Heat Index as per USA National Weather Service Standards\n "} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef wind_chill(temp, wind):\n if temp is None or wind is None:\n return None\n wind_kph = wind * 3.6\n if wind_kph <= 4.8 or temp > 10.0:\n return temp\n return min(13.12 + (temp * 0.6215) +\n (((0.3965 * temp) - 11.37) * (wind_kph ** 0.16)),\n temp)", "response": "Compute wind chill using formula from\n http://en. wikidata. org."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncomputes apparent temperature using formula from http://www. omgov. au / info / thermal_stress /", "response": "def apparent_temp(temp, rh, wind):\n \"\"\"Compute apparent temperature (real feel), using formula from\n http://www.bom.gov.au/info/thermal_stress/\n\n \"\"\"\n if temp is None or rh is None or wind is None:\n return None\n vap_press = (float(rh) / 100.0) * 6.105 * math.exp(\n 17.27 * temp / (237.7 + temp))\n return temp + (0.33 * vap_press) - (0.70 * wind) - 4.00"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ncalculates cumulus cloud base in metres using formula from https://en. wikidata. org.", "response": "def cloud_base(temp, hum):\n \"\"\"Calculate cumulus cloud base in metres, using formula from\n https://en.wikipedia.org/wiki/Cloud_base or\n https://de.wikipedia.org/wiki/Kondensationsniveau#Konvektionskondensationsniveau\n \"\"\"\n if temp is None or hum is None:\n return None\n dew_pt = dew_point(temp, hum)\n spread = float(temp) - dew_pt\n return spread * 125.0"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef upload(training_dir, algorithm_id=None, writeup=None, api_key=None, ignore_open_monitors=False):\n\n if not ignore_open_monitors:\n open_monitors = monitoring._open_monitors()\n if len(open_monitors) > 0:\n envs = [m.env.spec.id if m.env.spec else '(unknown)' for m in open_monitors]\n raise error.Error(\"Still have an open monitor on {}. You must run 'env.monitor.close()' before uploading.\".format(', '.join(envs)))\n\n env_info, training_episode_batch, training_video = upload_training_data(training_dir, api_key=api_key)\n env_id = env_info['env_id']\n training_episode_batch_id = training_video_id = None\n if training_episode_batch:\n training_episode_batch_id = training_episode_batch.id\n if training_video:\n training_video_id = training_video.id\n\n if logger.level <= logging.INFO:\n if training_episode_batch_id is not None and training_video_id is not None:\n logger.info('[%s] Creating evaluation object from %s with learning curve and training video', env_id, training_dir)\n elif training_episode_batch_id is not None:\n logger.info('[%s] Creating evaluation object from %s with learning curve', env_id, training_dir)\n elif training_video_id is not None:\n logger.info('[%s] Creating evaluation object from %s with training video', env_id, training_dir)\n else:\n raise error.Error(\"[%s] You didn't have any recorded training data in {}. Once you've used 'env.monitor.start(training_dir)' to start recording, you need to actually run some rollouts. Please join the community chat on https://gym.openai.com if you have any issues.\".format(env_id, training_dir))\n\n evaluation = resource.Evaluation.create(\n training_episode_batch=training_episode_batch_id,\n training_video=training_video_id,\n env=env_info['env_id'],\n algorithm={\n 'id': algorithm_id,\n },\n writeup=writeup,\n gym_version=env_info['gym_version'],\n api_key=api_key,\n# >>>>>>>>> START changes >>>>>>>>>>>>>>>>>>>>>>>>\n env_info=env_info,\n# <<<<<<<<< END changes <<<<<<<<<<<<<<<<<<<<<<<<<<\n )\n\n logger.info(\n\n \"\"\"\n****************************************************\nYou successfully uploaded your evaluation on %s to\nOpenAI Gym! You can find it at:\n\n %s\n\n****************************************************\n \"\"\".rstrip(), env_id, evaluation.web_url())\n\n return evaluation", "response": "Uploads the training data to the environment and returns the training data."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef load_user_envs(self):\n installed_packages = self._list_packages()\n\n # Tagging core envs\n gym_package = 'gym ({})'.format(installed_packages['gym']) if 'gym' in installed_packages else 'gym'\n core_specs = registry.all()\n for spec in core_specs:\n spec.source = 'OpenAI Gym Core Package'\n spec.package = gym_package\n\n # Loading user envs\n if not os.path.isfile(self.cache_path):\n return\n with open(self.cache_path) as cache:\n for line in cache:\n user_package, registered_envs = self._load_package(line.rstrip('\\n'), installed_packages)\n if logger.level <= logging.DEBUG:\n logger.debug('Installed %d user environments from package \"%s\"', len(registered_envs), user_package['name'])\n if self.cache_needs_update:\n self._update_cache()\n if len(self.env_ids) > 0:\n logger.info('Found and registered %d user environments.', len(self.env_ids))", "response": "Loads downloaded user environments from filesystem cache on import gym"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndownloads and register a user environment from a git repository", "response": "def pull(self, source=''):\n \"\"\"\n Downloads and registers a user environment from a git repository\n Args:\n source: the source where to download the envname (expected 'github.com/user/repo[@branch]')\n\n Note: the user environment will be registered as (username/EnvName-vVersion)\n \"\"\"\n # Checking syntax\n branch_parts = source.split('@')\n if len(branch_parts) == 2:\n branch = branch_parts[1]\n source = branch_parts[0]\n git_url = 'https://{}.git@{}'.format(source, branch)\n else:\n git_url = 'https://{}.git'.format(source)\n\n # Validating params\n source_parts = source.split('/')\n if len(source_parts) != 3 or source_parts[0].lower() != 'github.com':\n logger.warn(\"\"\" Invalid Syntax - source must be in the format 'github.com/username/repository[@branch]'\n\nSyntax: gym.pull('github.com/username/repository')\n\nwhere username is a GitHub username, repository is the name of a GitHub repository.\"\"\")\n return\n\n username = source_parts[1]\n modified_packages = []\n\n # Installing pip package\n logger.info('Installing pip package from \"%s\"', git_url)\n packages_before = self._list_packages()\n return_code = self._run_cmd('{} install --upgrade git+{}'.format(pip_exec, git_url))\n if return_code != 0: # Failed - pip will display the error message\n return\n\n # Detecting new and upgraded packages\n packages_after = self._list_packages()\n for package_name in packages_after:\n package_version = packages_after[package_name]\n if package_name not in packages_before:\n logger.info('Installed new package: \"%s (%s)\"', package_name, package_version)\n modified_packages.append(package_name)\n\n elif LooseVersion(packages_before[package_name]) < LooseVersion(package_version):\n logger.info('Upgraded package \"%s\" from \"%s\" to \"%s\"',\n package_name, packages_before[package_name], package_version)\n modified_packages.append(package_name)\n\n elif LooseVersion(packages_before[package_name]) > LooseVersion(package_version):\n logger.warn('Package \"%s\" downgraded from \"%s\" to \"%s\". Are you sure that is what you want?',\n package_name, packages_before[package_name], package_version)\n modified_packages.append(package_name)\n\n # Package conflict - check if already installed from a different source\n for package_name in modified_packages:\n if package_name in self.user_packages and source != self.user_packages[package_name]['source']:\n logger.warn('Package conflict - The package \"%s\" from \"%s\" was already installed from \"%s\". '\n 'Uninstalling both packages. Please reinstall the one you want.',\n package_name, source, self.user_packages[package_name]['source'])\n self._deregister_envs_from_source(source)\n self._deregister_envs_from_source(self.user_packages[package_name]['source'])\n self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))\n del self.user_packages[package_name]\n self._update_cache()\n return\n\n # Detecting if already up-to-date\n if len(modified_packages) == 0:\n logger.warn('The user environments for \"%s\" are already up-to-date (no new version detected).', source)\n return\n\n # De-register envs with same source\n self._deregister_envs_from_source(source)\n\n # Loading new packages\n new_envs = set([])\n uninstall_packages = []\n for package_name in modified_packages:\n json_line = json.dumps({'name': package_name, 'version': packages_after[package_name], 'source': source})\n user_package, registered_envs = self._load_package(json_line, packages_after)\n for new_env in registered_envs:\n if not new_env.lower().startswith('{}/'.format(username.lower())):\n if len(uninstall_packages) == 0: # We don't need to repeat the message multiple times\n logger.warn('This package does not respect the naming convention and will be uninstalled to avoid conflicts. '\n 'Expected user environment to start with \"{}/\", but got \"{}\" instead.'.format(username, new_env))\n uninstall_packages.append(package_name)\n new_envs = new_envs | registered_envs\n\n # Removing packages and deregistering envs if they don't respect naming convention\n if len(uninstall_packages) > 0:\n self._deregister_envs_from_source(source)\n for package_name in uninstall_packages:\n self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))\n return\n\n # Updating cache\n self._update_cache()\n\n # Displaying results\n logger.info('--------------------------------------------------')\n if len(new_envs) > 0:\n for env in sorted(new_envs, key=lambda s: s.lower()):\n logger.info('Successfully registered the environment: \"%s\"', env)\n else:\n logger.info('No environments have been registered. The following packages were modified: %s', ','.join(modified_packages))\n return"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _load_package(self, json_line, installed_packages):\n if len(json_line) == 0:\n return {}, set([])\n\n valid_json = False\n try:\n user_package = json.loads(json_line)\n valid_json = True\n except ValueError:\n user_package = {}\n\n package_name = user_package['name'] if 'name' in user_package else None\n module_name = package_name.replace('-', '_') if package_name is not None else ''\n envs_before = set(registry.list())\n\n if not valid_json or package_name is None:\n self.cache_needs_update = True\n logger.warn('Unable to load user environments. Try deleting your cache '\n 'file \"%s\" if this problem persists. \\n\\nLine: %s', self.cache_path, json_line)\n return {}, set([])\n elif package_name not in installed_packages:\n self.cache_needs_update = True\n logger.warn('The package \"%s\" does not seem to be installed anymore. User environments from this '\n 'package will not be registered, and the package will no longer be loaded on `import gym`', package_name)\n elif module_name in sys.modules:\n self.cache_needs_update = True\n try:\n reload_module(sys.modules[module_name])\n except ImportError:\n if 'gym' in package_name: # To avoid uninstalling failing dependencies\n logger.warn('Unable to reload the module \"%s\" from package \"%s\" (%s). This is usually caused by a '\n 'invalid pip package. The package will be uninstalled and no longer be loaded on `import gym`.\\n',\n module_name, package_name, installed_packages[package_name])\n traceback.print_exc(file=sys.stdout)\n sys.stdout.write('\\n')\n self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))\n else:\n try:\n __import__(module_name)\n except ImportError:\n if 'gym' in package_name: # To avoid uninstalling failing dependencies\n self.cache_needs_update = True\n logger.warn('Unable to import the module \"%s\" from package \"%s\" (%s). This is usually caused by a '\n 'invalid pip package. The package will be uninstalled and no longer be loaded on `import gym`.\\n',\n module_name, package_name, installed_packages[package_name])\n traceback.print_exc(file=sys.stdout)\n sys.stdout.write('\\n')\n self._run_cmd('{} uninstall -y {}'.format(pip_exec, package_name))\n\n envs_after = set(registry.list())\n registered_envs = envs_after - envs_before\n if len(registered_envs) > 0:\n self.user_packages[package_name] = user_package\n for new_env in registered_envs:\n new_spec = registry.spec(new_env)\n new_spec.source = user_package['source']\n new_spec.package = '{} ({})'.format(user_package['name'], user_package['version'])\n self.env_ids.add(new_env.lower())\n return user_package, registered_envs", "response": "Load a user package from a JSON file and return the user_package name version source list of environments registered when the package was loaded."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef make(self):\n if self._entry_point is None:\n raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))\n\n cls = load(self._entry_point)\n env = cls(**self._kwargs)\n\n # Make the enviroment aware of which spec it came from.\n env.spec = self\n env = env.build(extra_wrappers=self._wrappers)\n\n return env", "response": "Instantiates an instance of the environment with appropriate kwargs"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nparsing a path into hosts and path tuple.", "response": "def _parse_path(self, path):\n \"\"\"Return (hosts, path) tuple\"\"\"\n # Support specifying another host via hdfs://host:port/path syntax\n # We ignore the scheme and piece together the query and fragment\n # Note that HDFS URIs are not URL encoded, so a '?' or a '#' in the URI is part of the\n # path\n parts = urlsplit(path, allow_fragments=False)\n if not parts.path.startswith('/'):\n raise ValueError(\"Path must be absolute, was given {}\".format(path))\n if parts.scheme not in ('', 'hdfs', 'hftp', 'webhdfs'):\n warnings.warn(\"Unexpected scheme {}\".format(parts.scheme))\n assert not parts.fragment\n path = parts.path\n if parts.query:\n path += '?' + parts.query\n if parts.netloc:\n hosts = self._parse_hosts(parts.netloc)\n else:\n hosts = self.hosts\n return hosts, path"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _record_last_active(self, host):\n if host in self.hosts: # this check is for when user passes a host at request time\n # Keep this thread safe: set hosts atomically and update it before the timestamp\n self.hosts = [host] + [h for h in self.hosts if h != host]\n self._last_time_recorded_active = time.time()", "response": "Record the last active entry in our hosts list."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nmakes a request against the NameNodes WebHDFS server.", "response": "def _request(self, method, path, op, expected_status=httplib.OK, **kwargs):\n \"\"\"Make a WebHDFS request against the NameNodes\n\n This function handles NameNode failover and error checking.\n All kwargs are passed as query params to the WebHDFS server.\n \"\"\"\n hosts, path = self._parse_path(path)\n _transform_user_name_key(kwargs)\n kwargs.setdefault('user.name', self.user_name)\n\n formatted_args = ' '.join('{}={}'.format(*t) for t in kwargs.items())\n _logger.info(\"%s %s %s %s\", op, path, formatted_args, ','.join(hosts))\n kwargs['op'] = op\n for i in range(self.max_tries):\n log_level = logging.DEBUG if i < self.max_tries - 1 else logging.WARNING\n for host in hosts:\n try:\n response = self._requests_session.request(\n method,\n 'http://{}{}{}'.format(host, WEBHDFS_PATH, url_quote(path.encode('utf-8'))),\n params=kwargs, timeout=self.timeout, allow_redirects=False,\n **self._requests_kwargs\n )\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):\n _logger.log(log_level, \"Failed to reach to %s (attempt %d/%d)\",\n host, i + 1, self.max_tries, exc_info=True)\n continue\n try:\n _check_response(response, expected_status)\n except (HdfsRetriableException, HdfsStandbyException):\n _logger.log(log_level, \"%s is in startup or standby mode (attempt %d/%d)\",\n host, i + 1, self.max_tries, exc_info=True)\n continue\n # Note: standby NN can still return basic validation errors, so non-StandbyException\n # does not necessarily mean we have the active NN.\n self._record_last_active(host)\n return response\n if i != self.max_tries - 1:\n time.sleep(self.retry_delay)\n raise HdfsNoServerException(\"Could not use any of the given hosts\")"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create(self, path, data, **kwargs):\n metadata_response = self._put(\n path, 'CREATE', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)\n assert not metadata_response.content\n data_response = self._requests_session.put(\n metadata_response.headers['location'], data=data, **self._requests_kwargs)\n _check_response(data_response, expected_status=httplib.CREATED)\n assert not data_response.content", "response": "Create a file at the given path."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef append(self, path, data, **kwargs):\n metadata_response = self._post(\n path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)\n data_response = self._requests_session.post(\n metadata_response.headers['location'], data=data, **self._requests_kwargs)\n _check_response(data_response)\n assert not data_response.content", "response": "Append to the given file."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nconcatenate existing files together.", "response": "def concat(self, target, sources, **kwargs):\n \"\"\"Concat existing files together.\n\n For preconditions, see\n https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources\n\n :param target: the path to the target destination.\n :param sources: the paths to the sources to use for the concatenation.\n :type sources: list\n \"\"\"\n if isinstance(sources, basestring):\n raise ValueError(\"sources should be a list\")\n if any(',' in s for s in sources):\n raise NotImplementedError(\"WebHDFS does not support commas in concat\")\n response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs)\n assert not response.content"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef open(self, path, **kwargs):\n metadata_response = self._get(\n path, 'OPEN', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)\n data_response = self._requests_session.get(\n metadata_response.headers['location'], stream=True, **self._requests_kwargs)\n _check_response(data_response)\n return data_response.raw", "response": "Open a file - like object for reading the given HDFS path."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef create_symlink(self, link, destination, **kwargs):\n response = self._put(link, 'CREATESYMLINK', destination=destination, **kwargs)\n assert not response.content", "response": "Create a symbolic link at link pointing to destination."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef rename(self, path, destination, **kwargs):\n return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean']", "response": "Renames Path src to Path dst."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nreturns a FileStatus object that represents the path.", "response": "def get_file_status(self, path, **kwargs):\n \"\"\"Return a :py:class:`FileStatus` object that represents the path.\"\"\"\n return FileStatus(**_json(self._get(path, 'GETFILESTATUS', **kwargs))['FileStatus'])"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nlisting the statuses of the files and directories in the given path.", "response": "def list_status(self, path, **kwargs):\n \"\"\"List the statuses of the files/directories in the given path if the path is a directory.\n\n :rtype: ``list`` of :py:class:`FileStatus` objects\n \"\"\"\n return [\n FileStatus(**item) for item in\n _json(self._get(path, 'LISTSTATUS', **kwargs))['FileStatuses']['FileStatus']\n ]"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_content_summary(self, path, **kwargs):\n return ContentSummary(\n **_json(self._get(path, 'GETCONTENTSUMMARY', **kwargs))['ContentSummary'])", "response": "Return the ContentSummary of a given Path."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget the checksum of a file.", "response": "def get_file_checksum(self, path, **kwargs):\n \"\"\"Get the checksum of a file.\n\n :rtype: :py:class:`FileChecksum`\n \"\"\"\n metadata_response = self._get(\n path, 'GETFILECHECKSUM', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)\n assert not metadata_response.content\n data_response = self._requests_session.get(\n metadata_response.headers['location'], **self._requests_kwargs)\n _check_response(data_response)\n return FileChecksum(**_json(data_response)['FileChecksum'])"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_permission(self, path, **kwargs):\n response = self._put(path, 'SETPERMISSION', **kwargs)\n assert not response.content", "response": "Set the permission of a file or directory."} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nset the owner of a file or directory.", "response": "def set_owner(self, path, **kwargs):\n \"\"\"Set owner of a path (i.e. a file or a directory).\n\n The parameters owner and group cannot both be null.\n\n :param owner: user\n :param group: group\n \"\"\"\n response = self._put(path, 'SETOWNER', **kwargs)\n assert not response.content"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nset the modification time and accesstime of a file.", "response": "def set_times(self, path, **kwargs):\n \"\"\"Set access time of a file.\n\n :param modificationtime: Set the modification time of this file. The number of milliseconds\n since Jan 1, 1970.\n :type modificationtime: long\n :param accesstime: Set the access time of this file. The number of milliseconds since Jan 1\n 1970.\n :type accesstime: long\n \"\"\"\n response = self._put(path, 'SETTIMES', **kwargs)\n assert not response.content"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nsets an xattr of a file or directory.", "response": "def set_xattr(self, path, xattr_name, xattr_value, flag, **kwargs):\n \"\"\"Set an xattr of a file or directory.\n\n :param xattr_name: The name must be prefixed with the namespace followed by ``.``. For\n example, ``user.attr``.\n :param flag: ``CREATE`` or ``REPLACE``\n \"\"\"\n kwargs['xattr.name'] = xattr_name\n kwargs['xattr.value'] = xattr_value\n response = self._put(path, 'SETXATTR', flag=flag, **kwargs)\n assert not response.content"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nremove an xattr of a file or directory.", "response": "def remove_xattr(self, path, xattr_name, **kwargs):\n \"\"\"Remove an xattr of a file or directory.\"\"\"\n kwargs['xattr.name'] = xattr_name\n response = self._put(path, 'REMOVEXATTR', **kwargs)\n assert not response.content"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget one or more xattr values for a file or directory.", "response": "def get_xattrs(self, path, xattr_name=None, encoding='text', **kwargs):\n \"\"\"Get one or more xattr values for a file or directory.\n\n :param xattr_name: ``str`` to get one attribute, ``list`` to get multiple attributes,\n ``None`` to get all attributes.\n :param encoding: ``text`` | ``hex`` | ``base64``, defaults to ``text``\n\n :returns: Dictionary mapping xattr name to value. With text encoding, the value will be a\n unicode string. With hex or base64 encoding, the value will be a byte array.\n :rtype: dict\n \"\"\"\n kwargs['xattr.name'] = xattr_name\n json = _json(self._get(path, 'GETXATTRS', encoding=encoding, **kwargs))['XAttrs']\n # Decode the result\n result = {}\n for attr in json:\n k = attr['name']\n v = attr['value']\n if v is None:\n result[k] = None\n elif encoding == 'text':\n assert attr['value'].startswith('\"') and attr['value'].endswith('\"')\n result[k] = v[1:-1]\n elif encoding == 'hex':\n assert attr['value'].startswith('0x')\n # older python demands bytes, so we have to ascii encode\n result[k] = binascii.unhexlify(v[2:].encode('ascii'))\n elif encoding == 'base64':\n assert attr['value'].startswith('0s')\n # older python demands bytes, so we have to ascii encode\n result[k] = base64.b64decode(v[2:].encode('ascii'))\n else:\n warnings.warn(\"Unexpected encoding {}\".format(encoding))\n result[k] = v\n return result"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ngets all of the xattr names for a file or directory.", "response": "def list_xattrs(self, path, **kwargs):\n \"\"\"Get all of the xattr names for a file or directory.\n\n :rtype: list\n \"\"\"\n return simplejson.loads(_json(self._get(path, 'LISTXATTRS', **kwargs))['XAttrNames'])"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef delete_snapshot(self, path, snapshotname, **kwargs):\n response = self._delete(path, 'DELETESNAPSHOT', snapshotname=snapshotname, **kwargs)\n assert not response.content", "response": "Delete a snapshot of a directory"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef listdir(self, path, **kwargs):\n statuses = self.list_status(path, **kwargs)\n if len(statuses) == 1 and statuses[0].pathSuffix == '' and statuses[0].type == 'FILE':\n raise NotADirectoryError('Not a directory: {!r}'.format(path))\n return [f.pathSuffix for f in statuses]", "response": "Return a list containing names of files in the given path"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nreturn True if the given path exists", "response": "def exists(self, path, **kwargs):\n \"\"\"Return true if the given path exists\"\"\"\n try:\n self.get_file_status(path, **kwargs)\n return True\n except HdfsFileNotFoundException:\n return False"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nseeing ``os.walk`` for documentation", "response": "def walk(self, top, topdown=True, onerror=None, **kwargs):\n \"\"\"See ``os.walk`` for documentation\"\"\"\n try:\n listing = self.list_status(top, **kwargs)\n except HdfsException as e:\n if onerror is not None:\n onerror(e)\n return\n\n dirnames, filenames = [], []\n for f in listing:\n if f.type == 'DIRECTORY':\n dirnames.append(f.pathSuffix)\n elif f.type == 'FILE':\n filenames.append(f.pathSuffix)\n else: # pragma: no cover\n raise AssertionError(\"Unexpected type {}\".format(f.type))\n\n if topdown:\n yield top, dirnames, filenames\n for name in dirnames:\n new_path = posixpath.join(top, name)\n for x in self.walk(new_path, topdown, onerror, **kwargs):\n yield x\n if not topdown:\n yield top, dirnames, filenames"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\ncopying a single file from the local file system to dest.", "response": "def copy_from_local(self, localsrc, dest, **kwargs):\n \"\"\"Copy a single file from the local file system to ``dest``\n\n Takes all arguments that :py:meth:`create` takes.\n \"\"\"\n with io.open(localsrc, 'rb') as f:\n self.create(dest, f, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef copy_to_local(self, src, localdest, **kwargs):\n with self.open(src, **kwargs) as fsrc:\n with io.open(localdest, 'wb') as fdst:\n shutil.copyfileobj(fsrc, fdst)", "response": "Copy a single file from src to local file system\n\n Takes all arguments that open takes."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef get_active_namenode(self, max_staleness=None):\n if (max_staleness is None or\n self._last_time_recorded_active is None or\n self._last_time_recorded_active < time.time() - max_staleness):\n # Make a cheap request and rely on the reordering in self._record_last_active\n self.get_file_status('/')\n return self.hosts[0]", "response": "Return the address of the currently active NameNode."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef next(self):\n if not self.__has_more():\n raise StopIteration()\n else:\n return javabridge.get_env().get_string(self.__next())", "response": "Reads the next instance of the set of attributes."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef main():\n\n parser = argparse.ArgumentParser(\n description='Executes an associator from the command-line. Calls JVM start/stop automatically.')\n parser.add_argument(\"-j\", metavar=\"classpath\", dest=\"classpath\", help=\"additional classpath, jars/directories\")\n parser.add_argument(\"-X\", metavar=\"heap\", dest=\"heap\", help=\"max heap size for jvm, e.g., 512m\")\n parser.add_argument(\"-t\", metavar=\"train\", dest=\"train\", required=True, help=\"training set file\")\n parser.add_argument(\"associator\", help=\"associator classname, e.g., weka.associations.Apriori\")\n parser.add_argument(\"option\", nargs=argparse.REMAINDER, help=\"additional associator options\")\n parsed = parser.parse_args()\n jars = []\n if parsed.classpath is not None:\n jars = parsed.classpath.split(os.pathsep)\n\n jvm.start(jars, max_heap_size=parsed.heap, packages=True)\n\n logger.debug(\"Commandline: \" + join_options(sys.argv[1:]))\n\n try:\n associator = Associator(classname=parsed.associator)\n if len(parsed.option) > 0:\n associator.options = parsed.option\n loader = converters.loader_for_file(parsed.train)\n data = loader.load_file(parsed.train)\n associator.build_associations(data)\n print(str(associator))\n except Exception, e:\n print(e)\n finally:\n jvm.stop()", "response": "Main entry point for the associator."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nreturns the next rule object in the list.", "response": "def next(self):\n \"\"\"\n Returns the next rule.\n\n :return: the next rule object\n :rtype: AssociationRule\n \"\"\"\n if self.index < self.length:\n index = self.index\n self.index += 1\n return self.rules[index]\n else:\n raise StopIteration()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nbuilding the tree for the given actor.", "response": "def _build_tree(self, actor, content):\n \"\"\"\n Builds the tree for the given actor.\n\n :param actor: the actor to process\n :type actor: Actor\n :param content: the rows of the tree collected so far\n :type content: list\n \"\"\"\n depth = actor.depth\n row = \"\"\n for i in xrange(depth - 1):\n row += \"| \"\n if depth > 0:\n row += \"|-\"\n name = actor.name\n if name != actor.__class__.__name__:\n name = actor.__class__.__name__ + \" '\" + name + \"'\"\n row += name\n quickinfo = actor.quickinfo\n if quickinfo is not None:\n row += \" [\" + quickinfo + \"]\"\n content.append(row)\n\n if isinstance(actor, ActorHandler):\n for sub in actor.actors:\n self._build_tree(sub, content)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef check_actors(self):\n actors = []\n for actor in self.owner.actors:\n if actor.skip:\n continue\n actors.append(actor)\n if len(actors) == 0:\n return\n if not self.allow_source and base.is_source(actors[0]):\n raise Exception(\"Actor '\" + actors[0].full_name + \"' is a source, but no sources allowed!\")\n for i in xrange(1, len(actors)):\n if not isinstance(actors[i], InputConsumer):\n raise Exception(\"Actor does not accept any input: \" + actors[i].full_name)", "response": "Checks the actors of the owner. Raises an exception if invalid."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nsaving the flow to a JSON file.", "response": "def save(cls, flow, fname):\n \"\"\"\n Saves the flow to a JSON file.\n\n :param flow: the flow to save\n :type flow: Flow\n :param fname: the file to load\n :type fname: str\n :return: None if successful, otherwise error message\n :rtype: str\n \"\"\"\n result = None\n try:\n f = open(fname, 'w')\n f.write(flow.to_json())\n f.close()\n except Exception, e:\n result = str(e)\n return result"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef setup(self):\n result = super(BranchDirector, self).setup()\n if result is None:\n try:\n self.check_actors()\n except Exception, e:\n result = str(e)\n return result", "response": "Performs some checks.\n\n :return: None if successful, otherwise error message.\n :rtype: str"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef ranked_attributes(self):\n matrix = javabridge.call(self.jobject, \"rankedAttributes\", \"()[[D\")\n if matrix is None:\n return None\n else:\n return arrays.double_matrix_to_ndarray(matrix)", "response": "Returns the matrix of ranked attributes from the last run."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef next(self):\n if self.index < self.length:\n index = self.index\n self.index += 1\n return self.data[index]\n else:\n raise StopIteration()", "response": "Returns the next element from the array."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef options(self):\n if self.is_optionhandler:\n return types.string_array_to_list(javabridge.call(self.jobject, \"getOptions\", \"()[Ljava/lang/String;\"))\n else:\n return []", "response": "Gets the currently set options as list."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef options(self, options):\n if self.is_optionhandler:\n javabridge.call(self.jobject, \"setOptions\", \"([Ljava/lang/String;)V\", types.string_list_to_array(options))", "response": "Sets the command - line options."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nplot the cluster assignments against the specified attributes.", "response": "def plot_cluster_assignments(evl, data, atts=None, inst_no=False, size=10, title=None, outfile=None, wait=True):\n \"\"\"\n Plots the cluster assignments against the specified attributes.\n\n TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html\n\n :param evl: the cluster evaluation to obtain the cluster assignments from\n :type evl: ClusterEvaluation\n :param data: the dataset the clusterer was evaluated against\n :type data: Instances\n :param atts: the list of attribute indices to plot, None for all\n :type atts: list\n :param inst_no: whether to include a fake attribute with the instance number\n :type inst_no: bool\n :param size: the size of the circles in point\n :type size: int\n :param title: an optional title\n :type title: str\n :param outfile: the (optional) file to save the generated plot to. The extension determines the file format.\n :type outfile: str\n :param wait: whether to wait for the user to close the plot\n :type wait: bool\n \"\"\"\n if not plot.matplotlib_available:\n logger.error(\"Matplotlib is not installed, plotting unavailable!\")\n return\n\n fig = plt.figure()\n\n if data.class_index == -1:\n c = None\n else:\n c = []\n for i in xrange(data.num_instances):\n inst = data.get_instance(i)\n c.append(inst.get_value(inst.class_index))\n\n if atts is None:\n atts = []\n for i in xrange(data.num_attributes):\n atts.append(i)\n\n num_plots = len(atts)\n if inst_no:\n num_plots += 1\n\n clusters = evl.cluster_assignments\n\n for index, att in enumerate(atts):\n x = data.values(att)\n ax = fig.add_subplot(\n 1, num_plots, index + 1)\n if c is None:\n ax.scatter(clusters, x, s=size, alpha=0.5)\n else:\n ax.scatter(clusters, x, c=c, s=size, alpha=0.5)\n ax.set_xlabel(\"Clusters\")\n ax.set_title(data.attribute(att).name)\n ax.get_xaxis().set_ticks(list(set(clusters)))\n ax.grid(True)\n\n if inst_no:\n x = []\n for i in xrange(data.num_instances):\n x.append(i+1)\n ax = fig.add_subplot(\n 1, num_plots, num_plots)\n if c is None:\n ax.scatter(clusters, x, s=size, alpha=0.5)\n else:\n ax.scatter(clusters, x, c=c, s=size, alpha=0.5)\n ax.set_xlabel(\"Clusters\")\n ax.set_title(\"Instance number\")\n ax.get_xaxis().set_ticks(list(set(clusters)))\n ax.grid(True)\n\n if title is None:\n title = data.relationname\n fig.canvas.set_window_title(title)\n plt.draw()\n if not outfile is None:\n plt.savefig(outfile)\n if wait:\n plt.show()"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nwrites the list of objects to disk.", "response": "def write_all(filename, jobjects):\n \"\"\"\n Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.\n\n :param filename: the file to serialize the object to\n :type filename: str\n :param jobjects: the list of objects to serialize\n :type jobjects: list\n \"\"\"\n array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class(\"java/lang/Object\"))\n for i in xrange(len(jobjects)):\n obj = jobjects[i]\n if isinstance(obj, JavaObject):\n obj = obj.jobject\n javabridge.get_env().set_object_array_element(array, i, obj)\n javabridge.static_call(\n \"Lweka/core/SerializationHelper;\", \"writeAll\",\n \"(Ljava/lang/String;[Ljava/lang/Object;)V\",\n filename, array)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef main():\n parser = argparse.ArgumentParser(\n description='Executes a filter from the command-line. Calls JVM start/stop automatically.')\n parser.add_argument(\"-j\", metavar=\"classpath\", dest=\"classpath\", help=\"additional classpath, jars/directories\")\n parser.add_argument(\"-X\", metavar=\"heap\", dest=\"heap\", help=\"max heap size for jvm, e.g., 512m\")\n parser.add_argument(\"-i\", metavar=\"input1\", dest=\"input1\", required=True, help=\"input file 1\")\n parser.add_argument(\"-o\", metavar=\"output1\", dest=\"output1\", required=True, help=\"output file 1\")\n parser.add_argument(\"-r\", metavar=\"input2\", dest=\"input2\", help=\"input file 2\")\n parser.add_argument(\"-s\", metavar=\"output2\", dest=\"output2\", help=\"output file 2\")\n parser.add_argument(\"-c\", metavar=\"classindex\", default=\"-1\", dest=\"classindex\",\n help=\"1-based class attribute index\")\n parser.add_argument(\"filter\", help=\"filter classname, e.g., weka.filters.AllFilter\")\n parser.add_argument(\"option\", nargs=argparse.REMAINDER, help=\"additional filter options\")\n parsed = parser.parse_args()\n if parsed.input2 is None and parsed.output2 is not None:\n raise Exception(\"No second input file provided ('-r ...')!\")\n\n jars = []\n if parsed.classpath is not None:\n jars = parsed.classpath.split(os.pathsep)\n params = []\n if parsed.input1 is not None:\n params.extend([\"-i\", parsed.input1])\n if parsed.output1 is not None:\n params.extend([\"-o\", parsed.output1])\n if parsed.input2 is not None:\n params.extend([\"-r\", parsed.input2])\n if parsed.output2 is not None:\n params.extend([\"-s\", parsed.output2])\n if parsed.classindex is not None:\n params.extend([\"-c\", parsed.classindex])\n\n jvm.start(jars, max_heap_size=parsed.heap, packages=True)\n\n logger.debug(\"Commandline: \" + join_options(sys.argv[1:]))\n\n try:\n flter = Filter(parsed.filter)\n if len(parsed.option) > 0:\n flter.options = parsed.option\n loader = Loader(classname=\"weka.core.converters.ArffLoader\")\n in1 = loader.load_file(parsed.input1)\n cls = parsed.classindex\n if str(parsed.classindex) == \"first\":\n cls = \"0\"\n if str(parsed.classindex) == \"last\":\n cls = str(in1.num_attributes - 1)\n in1.class_index = int(cls)\n flter.inputformat(in1)\n out1 = flter.filter(in1)\n saver = Saver(classname=\"weka.core.converters.ArffSaver\")\n saver.save_file(out1, parsed.output1)\n if parsed.input2 is not None:\n in2 = loader.load_file(parsed.input2)\n in2.class_index = int(cls)\n out2 = flter.filter(in2)\n saver.save_file(out2, parsed.output2)\n except Exception, e:\n print(e)\n finally:\n jvm.stop()", "response": "Entry point for the\n command - line interface."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef next(self):\n result = javabridge.call(\n self.loader.jobject, \"getNextInstance\",\n \"(Lweka/core/Instances;)Lweka/core/Instance;\", self.structure.jobject)\n if result is None:\n raise StopIteration()\n else:\n return Instance(result)", "response": "Reads the next dataset row."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef plot_classifier_errors(predictions, absolute=True, max_relative_size=50, absolute_size=50, title=None,\n outfile=None, wait=True):\n \"\"\"\n Plots the classifers for the given list of predictions.\n\n TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html\n\n :param predictions: the predictions to plot\n :type predictions: list\n :param absolute: whether to use absolute errors as size or relative ones\n :type absolute: bool\n :param max_relative_size: the maximum size in point in case of relative mode\n :type max_relative_size: int\n :param absolute_size: the size in point in case of absolute mode\n :type absolute_size: int\n :param title: an optional title\n :type title: str\n :param outfile: the output file, ignored if None\n :type outfile: str\n :param wait: whether to wait for the user to close the plot\n :type wait: bool\n \"\"\"\n if not plot.matplotlib_available:\n logger.error(\"Matplotlib is not installed, plotting unavailable!\")\n return\n actual = []\n predicted = []\n error = None\n cls = None\n for pred in predictions:\n actual.append(pred.actual)\n predicted.append(pred.predicted)\n if isinstance(pred, NumericPrediction):\n if error is None:\n error = []\n error.append(abs(pred.error))\n elif isinstance(pred, NominalPrediction):\n if cls is None:\n cls = []\n if pred.actual != pred.predicted:\n cls.append(1)\n else:\n cls.append(0)\n fig, ax = plt.subplots()\n if error is None and cls is None:\n ax.scatter(actual, predicted, s=absolute_size, alpha=0.5)\n elif cls is not None:\n ax.scatter(actual, predicted, c=cls, s=absolute_size, alpha=0.5)\n elif error is not None:\n if not absolute:\n min_err = min(error)\n max_err = max(error)\n factor = (max_err - min_err) / max_relative_size\n for i in xrange(len(error)):\n error[i] = error[i] / factor * max_relative_size\n ax.scatter(actual, predicted, s=error, alpha=0.5)\n ax.set_xlabel(\"actual\")\n ax.set_ylabel(\"predicted\")\n if title is None:\n title = \"Classifier errors\"\n ax.set_title(title)\n ax.plot(ax.get_xlim(), ax.get_ylim(), ls=\"--\", c=\"0.3\")\n ax.grid(True)\n fig.canvas.set_window_title(title)\n plt.draw()\n if outfile is not None:\n plt.savefig(outfile)\n if wait:\n plt.show()", "response": "Plots the classifers for the given list of predictions."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef main():\n parser = argparse.ArgumentParser(\n description='Performs classification/regression from the command-line. Calls JVM start/stop automatically.')\n parser.add_argument(\"-j\", metavar=\"classpath\", dest=\"classpath\", help=\"additional classpath, jars/directories\")\n parser.add_argument(\"-X\", metavar=\"heap\", dest=\"heap\", help=\"max heap size for jvm, e.g., 512m\")\n parser.add_argument(\"-t\", metavar=\"train\", dest=\"train\", required=True, help=\"Training set file\")\n parser.add_argument(\"-T\", metavar=\"test\", dest=\"test\", help=\"Test set file\")\n parser.add_argument(\"-c\", metavar=\"class index\", dest=\"classindex\", help=\"1-based class attribute index\")\n parser.add_argument(\"-d\", metavar=\"outmodel\", dest=\"outmodel\", help=\"model output file name\")\n parser.add_argument(\"-l\", metavar=\"inmodel\", dest=\"inmodel\", help=\"model input file name\")\n parser.add_argument(\"-x\", metavar=\"num folds\", dest=\"numfolds\", help=\"number of folds for cross-validation\")\n parser.add_argument(\"-s\", metavar=\"seed\", dest=\"seed\", help=\"seed value for randomization\")\n parser.add_argument(\"-v\", action=\"store_true\", dest=\"notrainstats\", help=\"no statistics for training\")\n parser.add_argument(\"-o\", action=\"store_true\", dest=\"onlystats\", help=\"only statistics, don't output model\")\n parser.add_argument(\"-i\", action=\"store_true\", dest=\"irstats\", help=\"output information retrieval statistics\")\n parser.add_argument(\"-k\", action=\"store_true\", dest=\"itstats\", help=\"output information theoretic statistics\")\n parser.add_argument(\"-m\", metavar=\"costmatrix\", dest=\"costmatrix\", help=\"cost matrix file\")\n parser.add_argument(\"-g\", metavar=\"graph\", dest=\"graph\", help=\"output file for graph (if supported)\")\n parser.add_argument(\"classifier\", help=\"classifier classname, e.g., weka.classifiers.trees.J48\")\n parser.add_argument(\"option\", nargs=argparse.REMAINDER, help=\"additional classifier options\")\n parsed = parser.parse_args()\n jars = []\n if parsed.classpath is not None:\n jars = parsed.classpath.split(os.pathsep)\n params = []\n if parsed.train is not None:\n params.extend([\"-t\", parsed.train])\n if parsed.test is not None:\n params.extend([\"-T\", parsed.test])\n if parsed.classindex is not None:\n params.extend([\"-c\", parsed.classindex])\n if parsed.outmodel is not None:\n params.extend([\"-d\", parsed.outmodel])\n if parsed.inmodel is not None:\n params.extend([\"-l\", parsed.inmodel])\n if parsed.numfolds is not None:\n params.extend([\"-x\", parsed.numfolds])\n if parsed.seed is not None:\n params.extend([\"-s\", parsed.seed])\n if parsed.notrainstats:\n params.append(\"-v\")\n if parsed.onlystats:\n params.append(\"-o\")\n if parsed.irstats:\n params.append(\"-i\")\n if parsed.itstats:\n params.append(\"-k\")\n if parsed.costmatrix is not None:\n params.extend([\"-m\", parsed.costmatrix])\n if parsed.graph is not None:\n params.extend([\"-g\", parsed.graph])\n\n jvm.start(jars, max_heap_size=parsed.heap, packages=True)\n\n logger.debug(\"Commandline: \" + join_options(sys.argv[1:]))\n\n try:\n classifier = Classifier(classname=parsed.classifier)\n if len(parsed.option) > 0:\n classifier.options = parsed.option\n print(Evaluation.evaluate_model(classifier, params))\n except Exception, e:\n print(e)\n finally:\n jvm.stop()", "response": "Main function for the\n command - line tool."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nreturns the class distributions for the Instances object.", "response": "def distributions_for_instances(self, data):\n \"\"\"\n Peforms predictions, returning the class distributions.\n\n :param data: the Instances to get the class distributions for\n :type data: Instances\n :return: the class distribution matrix, None if not a batch predictor\n :rtype: ndarray\n \"\"\"\n if self.is_batchpredictor:\n return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))\n else:\n return None"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nreturn the internal values of this attribute from all the instance objects.", "response": "def values(self, index):\n \"\"\"\n Returns the internal values of this attribute from all the instance objects.\n\n :return: the values as numpy array\n :rtype: list\n \"\"\"\n values = []\n for i in xrange(self.num_instances):\n inst = self.get_instance(i)\n values.append(inst.get_value(index))\n return numpy.array(values)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nreturning a new instance set with the same set of instances.", "response": "def append_instances(cls, inst1, inst2):\n \"\"\"\n Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.\n\n :param inst1: the first dataset\n :type inst1: Instances\n :param inst2: the first dataset\n :type inst2: Instances\n :return: the combined dataset\n :rtype: Instances\n \"\"\"\n msg = inst1.equal_headers(inst2)\n if msg is not None:\n raise Exception(\"Cannot appent instances: \" + msg)\n result = cls.copy_instances(inst1)\n for i in xrange(inst2.num_instances):\n result.add_instance(inst2.get_instance(i))\n return result"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef values(self):\n enm = javabridge.call(self.jobject, \"enumerateValues\", \"()Ljava/util/Enumeration;\")\n if enm is None:\n return None\n else:\n return types.enumeration_to_list(enm)", "response": "Returns the labels strings or relation - values."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef next(self):\n if self.row < self.data.num_instances:\n index = self.row\n self.row += 1\n return self.data.get_instance(index)\n else:\n raise StopIteration()", "response": "Returns the next row from the Instances object."} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreturn the next attribute from the Instances object.", "response": "def next(self):\n \"\"\"\n Returns the next attribute from the Instances object.\n\n :return: the next Attribute object\n :rtype: Attribute\n \"\"\"\n if self.col < self.data.num_attributes:\n index = self.col\n self.col += 1\n return self.data.attribute(index)\n else:\n raise StopIteration()"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncheck that the new file introduced is a python source file", "response": "def check(self, diff):\n \"\"\"Check that the new file introduced is a python source file\"\"\"\n path = diff.b_path\n assert any(\n path.endswith(ext)\n for ext in importlib.machinery.SOURCE_SUFFIXES\n )"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nchecking that the new file is within the contrib subdirectory", "response": "def check(self, diff):\n \"\"\"Check that the new file is within the contrib subdirectory\"\"\"\n path = diff.b_path\n contrib_path = self.project.contrib_module_path\n assert pathlib.Path(contrib_path) in pathlib.Path(path).parents"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef check(self, diff):\n relative_path = relative_to_contrib(diff, self.project)\n subpackage_name = relative_path.parts[0]\n assert re_test(SUBPACKAGE_NAME_REGEX, subpackage_name)", "response": "Check that the name of the subpackage within contrib is valid."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncheck that the new file introduced is at the proper depth", "response": "def check(self, diff):\n \"\"\"Check that the new file introduced is at the proper depth\n\n The proper depth is 2 (contrib/user_example/new_file.py)\n \"\"\"\n relative_path = relative_to_contrib(diff, self.project)\n assert len(relative_path.parts) == 2"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef check(self, diff):\n path = pathlib.Path(diff.b_path)\n filename = path.parts[-1]\n if filename == '__init__.py':\n abspath = self.project.path.joinpath(path)\n assert isemptyfile(abspath)", "response": "Check that the new file is empty"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nenabling simple console logging for this module", "response": "def enable(logger=logger,\n level=logging.INFO,\n format=DETAIL_LOG_FORMAT,\n echo=True):\n \"\"\"Enable simple console logging for this module\"\"\"\n global _handler\n if _handler is None:\n _handler = logging.StreamHandler()\n formatter = logging.Formatter(format)\n _handler.setFormatter(formatter)\n\n level = logging._checkLevel(level)\n levelName = logging._levelToName[level]\n\n logger.setLevel(level)\n _handler.setLevel(level)\n\n if _handler not in logger.handlers:\n logger.addHandler(_handler)\n\n if echo:\n logger.log(\n level, 'Logging enabled at level {name}.'.format(name=levelName))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef default_base_dir():\n base_dir = os.environ.get('OS_REFRESH_CONFIG_BASE_DIR')\n if base_dir is None:\n # NOTE(bnemec): Prefer the new location, but still allow the old one.\n if os.path.isdir(OLD_BASE_DIR) and not os.path.isdir(DEFAULT_BASE_DIR):\n logging.warning('Base directory %s is deprecated. The recommended '\n 'base directory is %s',\n OLD_BASE_DIR, DEFAULT_BASE_DIR)\n base_dir = OLD_BASE_DIR\n else:\n base_dir = DEFAULT_BASE_DIR\n return base_dir", "response": "Determine the default base directory path for the base directory of the current user."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nformat code content using Black Black", "response": "def blacken_code(code):\n \"\"\"Format code content using Black\n\n Args:\n code (str): code as string\n\n Returns:\n str\n \"\"\"\n if black is None:\n raise NotImplementedError\n\n major, minor, _ = platform.python_version_tuple()\n pyversion = 'py{major}{minor}'.format(major=major, minor=minor)\n target_versions = [black.TargetVersion[pyversion.upper()]]\n\n line_length = black.DEFAULT_LINE_LENGTH\n string_normalization = True\n\n mode = black.FileMode(\n target_versions=target_versions,\n line_length=line_length,\n string_normalization=string_normalization,\n )\n\n return black.format_file_contents(code, fast=False, mode=mode)"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\ntakes a asn1crypto. csr. CertificationRequest object and returns a PEM - encoded CSR.", "response": "def pem_armor_csr(certification_request):\n \"\"\"\n Encodes a CSR into PEM format\n\n :param certification_request:\n An asn1crypto.csr.CertificationRequest object of the CSR to armor.\n Typically this is obtained from CSRBuilder.build().\n\n :return:\n A byte string of the PEM-encoded CSR\n \"\"\"\n\n if not isinstance(certification_request, csr.CertificationRequest):\n raise TypeError(_pretty_message(\n '''\n certification_request must be an instance of\n asn1crypto.csr.CertificationRequest, not %s\n ''',\n _type_name(certification_request)\n ))\n\n return pem.armor(\n 'CERTIFICATE REQUEST',\n certification_request.dump()\n )"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nformat a multi - line string into a pretty - printed string.", "response": "def _pretty_message(string, *params):\n \"\"\"\n Takes a multi-line string and does the following:\n\n - dedents\n - converts newlines with text before and after into a single line\n - strips leading and trailing whitespace\n\n :param string:\n The string to format\n\n :param *params:\n Params to interpolate into the string\n\n :return:\n The formatted string\n \"\"\"\n\n output = textwrap.dedent(string)\n\n # Unwrap lines, taking into account bulleted lists, ordered lists and\n # underlines consisting of = signs\n if output.find('\\n') != -1:\n output = re.sub('(?<=\\\\S)\\n(?=[^ \\n\\t\\\\d\\\\*\\\\-=])', ' ', output)\n\n if params:\n output = output % params\n\n output = output.strip()\n\n return output"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nset the Subject field of the object.", "response": "def subject(self, value):\n \"\"\"\n An asn1crypto.x509.Name object, or a dict with at least the\n following keys:\n\n - country_name\n - state_or_province_name\n - locality_name\n - organization_name\n - common_name\n\n Less common keys include:\n\n - organizational_unit_name\n - email_address\n - street_address\n - postal_code\n - business_category\n - incorporation_locality\n - incorporation_state_or_province\n - incorporation_country\n\n Uncommon keys include:\n\n - surname\n - title\n - serial_number\n - name\n - given_name\n - initials\n - generation_qualifier\n - dn_qualifier\n - pseudonym\n - domain_component\n\n All values should be unicode strings\n \"\"\"\n\n is_dict = isinstance(value, dict)\n if not isinstance(value, x509.Name) and not is_dict:\n raise TypeError(_pretty_message(\n '''\n subject must be an instance of asn1crypto.x509.Name or a dict,\n not %s\n ''',\n _type_name(value)\n ))\n\n if is_dict:\n value = x509.Name.build(value)\n\n self._subject = value"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef subject_public_key(self, value):\n\n is_oscrypto = isinstance(value, asymmetric.PublicKey)\n if not isinstance(value, keys.PublicKeyInfo) and not is_oscrypto:\n raise TypeError(_pretty_message(\n '''\n subject_public_key must be an instance of\n asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey,\n not %s\n ''',\n _type_name(value)\n ))\n\n if is_oscrypto:\n value = value.asn1\n\n self._subject_public_key = value\n\n self._key_identifier = self._subject_public_key.sha1\n self._authority_key_identifier = None", "response": "Set the subject s public key."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef hash_algo(self, value):\n\n if value not in set(['sha1', 'sha256', 'sha512']):\n raise ValueError(_pretty_message(\n '''\n hash_algo must be one of \"sha1\", \"sha256\", \"sha512\", not %s\n ''',\n repr(value)\n ))\n\n self._hash_algo = value", "response": "Sets the hash algorithm of the log entry."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _get_subject_alt(self, name):\n\n if self._subject_alt_name is None:\n return []\n\n output = []\n for general_name in self._subject_alt_name:\n if general_name.name == name:\n output.append(general_name.native)\n return output", "response": "Returns the native value for each value in the subject alt name extension reqiest that is an asn1crypto. x509. GeneralName of the type\n ."} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nsets the subject alternative name of the certificate.", "response": "def _set_subject_alt(self, name, values):\n \"\"\"\n Replaces all existing asn1crypto.x509.GeneralName objects of the choice\n represented by the name parameter with the values\n\n :param name:\n A unicode string of the choice name of the x509.GeneralName object\n\n :param values:\n A list of unicode strings to use as the values for the new\n x509.GeneralName objects\n \"\"\"\n\n if self._subject_alt_name is not None:\n filtered_general_names = []\n for general_name in self._subject_alt_name:\n if general_name.name != name:\n filtered_general_names.append(general_name)\n self._subject_alt_name = x509.GeneralNames(filtered_general_names)\n else:\n self._subject_alt_name = x509.GeneralNames()\n\n if values is not None:\n for value in values:\n new_general_name = x509.GeneralName(name=name, value=value)\n self._subject_alt_name.append(new_general_name)\n\n if len(self._subject_alt_name) == 0:\n self._subject_alt_name = None"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nsetting the value for an extension using a fully constructed Asn1Value object from asn1crypto. x509. ExtensionId object from asn1crypto. x509. ExtensionId object from asn1crypto. x509. ExtensionId.", "response": "def set_extension(self, name, value):\n \"\"\"\n Sets the value for an extension using a fully constructed Asn1Value\n object from asn1crypto. Normally this should not be needed, and the\n convenience attributes should be sufficient.\n\n See the definition of asn1crypto.x509.Extension to determine the\n appropriate object type for a given extension. Extensions are marked\n as critical when RFC5280 or RFC6960 indicate so. If an extension is\n validly marked as critical or not (such as certificate policies and\n extended key usage), this class will mark it as non-critical.\n\n :param name:\n A unicode string of an extension id name from\n asn1crypto.x509.ExtensionId\n\n :param value:\n A value object per the specs defined by asn1crypto.x509.Extension\n \"\"\"\n\n extension = x509.Extension({\n 'extn_id': name\n })\n # We use native here to convert OIDs to meaningful names\n name = extension['extn_id'].native\n\n spec = extension.spec('extn_value')\n\n if not isinstance(value, spec) and value is not None:\n raise TypeError(_pretty_message(\n '''\n value must be an instance of %s, not %s\n ''',\n _type_name(spec),\n _type_name(value)\n ))\n\n if name in self._special_extensions:\n setattr(self, '_%s' % name, value)\n else:\n if value is None:\n if name in self._other_extensions:\n del self._other_extensions[name]\n else:\n self._other_extensions[name] = value"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ndetermine if the critical flag for the EV entry for the given EV entry name is True.", "response": "def _determine_critical(self, name):\n \"\"\"\n :return:\n A boolean indicating the correct value of the critical flag for\n an extension, based on information from RFC5280 and RFC 6960. The\n correct value is based on the terminology SHOULD or MUST.\n \"\"\"\n\n if name == 'subject_alt_name':\n return len(self._subject) == 0\n\n if name == 'basic_constraints':\n return self.ca is True\n\n return {\n 'subject_directory_attributes': False,\n 'key_usage': True,\n 'issuer_alt_name': False,\n 'name_constraints': True,\n # Based on example EV certificates, non-CA certs have this marked\n # as non-critical, most likely because existing browsers don't\n # seem to support policies or name constraints\n 'certificate_policies': False,\n 'policy_mappings': True,\n 'policy_constraints': True,\n 'extended_key_usage': False,\n 'inhibit_any_policy': True,\n 'subject_information_access': False,\n 'tls_feature': False,\n 'ocsp_no_check': False,\n }.get(name, False)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef build(self, signing_private_key):\n\n is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)\n if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:\n raise TypeError(_pretty_message(\n '''\n signing_private_key must be an instance of\n asn1crypto.keys.PrivateKeyInfo or\n oscrypto.asymmetric.PrivateKey, not %s\n ''',\n _type_name(signing_private_key)\n ))\n\n signature_algo = signing_private_key.algorithm\n if signature_algo == 'ec':\n signature_algo = 'ecdsa'\n\n signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)\n\n def _make_extension(name, value):\n return {\n 'extn_id': name,\n 'critical': self._determine_critical(name),\n 'extn_value': value\n }\n\n extensions = []\n for name in sorted(self._special_extensions):\n value = getattr(self, '_%s' % name)\n if value is not None:\n extensions.append(_make_extension(name, value))\n\n for name in sorted(self._other_extensions.keys()):\n extensions.append(_make_extension(name, self._other_extensions[name]))\n\n attributes = []\n if extensions:\n attributes.append({\n 'type': 'extension_request',\n 'values': [extensions]\n })\n\n certification_request_info = csr.CertificationRequestInfo({\n 'version': 'v1',\n 'subject': self._subject,\n 'subject_pk_info': self._subject_public_key,\n 'attributes': attributes\n })\n\n if signing_private_key.algorithm == 'rsa':\n sign_func = asymmetric.rsa_pkcs1v15_sign\n elif signing_private_key.algorithm == 'dsa':\n sign_func = asymmetric.dsa_sign\n elif signing_private_key.algorithm == 'ec':\n sign_func = asymmetric.ecdsa_sign\n\n if not is_oscrypto:\n signing_private_key = asymmetric.load_private_key(signing_private_key)\n signature = sign_func(signing_private_key, certification_request_info.dump(), self._hash_algo)\n\n return csr.CertificationRequest({\n 'certification_request_info': certification_request_info,\n 'signature_algorithm': {\n 'algorithm': signature_algorithm_id,\n },\n 'signature': signature\n })", "response": "Builds a new certificate and returns it."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef compute_metrics_cv(self, X, y, **kwargs):\n '''Compute cross-validated metrics.\n\n Trains this model on data X with labels y.\n Returns a list of dict with keys name, scoring_name, value.\n\n Args:\n X (Union[np.array, pd.DataFrame]): data\n y (Union[np.array, pd.DataFrame, pd.Series]): labels\n '''\n\n # compute scores\n results = self.cv_score_mean(X, y)\n return results", "response": "Compute cross - validated metrics.\n Trains this model on data X with labels y and computes scores for each entry in the result set."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef cv_score_mean(self, X, y):\n '''Compute mean score across cross validation folds.\n\n Split data and labels into cross validation folds and fit the model for\n each fold. Then, for each scoring type in scorings, compute the score.\n Finally, average the scores across folds. Returns a dictionary mapping\n scoring to score.\n\n Args:\n X (np.array): data\n y (np.array): labels\n scorings (List[str]): scoring types\n '''\n\n X, y = self._format_inputs(X, y)\n\n if self.problem_type.binary_classification:\n kf = StratifiedKFold(\n shuffle=True, random_state=RANDOM_STATE + 3)\n elif self.problem_type.multi_classification:\n self.target_type_transformer.inverse_transform(y)\n transformer = self.target_type_transformer\n kf = StratifiedKFoldMultiClassIndicator(\n transformer, shuffle=True, n_splits=3,\n random_state=RANDOM_STATE + 3)\n elif self.problem_type.regression:\n kf = KFold(shuffle=True, n_splits=3, random_state=RANDOM_STATE + 4)\n else:\n raise NotImplementedError\n\n scoring = {\n scorer_info.name: scorer_info.scorer\n for scorer_info in self.scorers_info\n }\n cv_results = cross_validate(\n self.estimator, X, y,\n scoring=scoring, cv=kf, return_train_score=False)\n\n # post-processing\n results = self._process_cv_results(cv_results)\n return results", "response": "Compute mean score across cross validation folds."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_contrib_features(project_root):\n # TODO Project should require ModuleType\n project = Project(project_root)\n contrib = project._resolve('.features.contrib')\n return _get_contrib_features(contrib)", "response": "Get contributed features for a project at project_root"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget contributed features from within given module", "response": "def _get_contrib_features(module):\n \"\"\"Get contributed features from within given module\n\n Be very careful with untrusted code. The module/package will be\n walked, every submodule will be imported, and all the code therein will be\n executed. But why would you be trying to import from an untrusted package\n anyway?\n\n Args:\n contrib (module): module (standalone or package) that contains feature\n definitions\n\n Returns:\n List[Feature]: list of features\n \"\"\"\n\n if isinstance(module, types.ModuleType):\n # any module that has a __path__ attribute is also a package\n if hasattr(module, '__path__'):\n yield from _get_contrib_features_from_package(module)\n else:\n yield _get_contrib_feature_from_module(module)\n else:\n raise ValueError('Input is not a module')"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef quickstart():\n import ballet.templating\n import ballet.util.log\n ballet.util.log.enable(level='INFO',\n format=ballet.util.log.SIMPLE_LOG_FORMAT,\n echo=False)\n ballet.templating.render_project_template()", "response": "Generate a brand - new ballet project"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nupdate an existing ballet project from the upstream template", "response": "def update_project_template(push):\n \"\"\"Update an existing ballet project from the upstream template\"\"\"\n import ballet.update\n import ballet.util.log\n ballet.util.log.enable(level='INFO',\n format=ballet.util.log.SIMPLE_LOG_FORMAT,\n echo=False)\n ballet.update.update_project_template(push=push)"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nstart working on a new feature from a template", "response": "def start_new_feature():\n \"\"\"Start working on a new feature from a template\"\"\"\n import ballet.templating\n import ballet.util.log\n ballet.util.log.enable(level='INFO',\n format=ballet.util.log.SIMPLE_LOG_FORMAT,\n echo=False)\n ballet.templating.start_new_feature()"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef write_tabular(obj, filepath):\n _, fn, ext = splitext2(filepath)\n if ext == '.h5':\n _write_tabular_h5(obj, filepath)\n elif ext == '.pkl':\n _write_tabular_pickle(obj, filepath)\n else:\n raise NotImplementedError", "response": "Write tabular object in HDF5 or pickle format"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef read_tabular(filepath):\n _, fn, ext = splitext2(filepath)\n if ext == '.h5':\n return _read_tabular_h5(filepath)\n elif ext == '.pkl':\n return _read_tabular_pickle(filepath)\n else:\n raise NotImplementedError", "response": "Read tabular object in HDF5 or pickle format\n "} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nloads a table from a dictionary containing the keys path and pd_read_kwargs.", "response": "def load_table_from_config(input_dir, config):\n \"\"\"Load table from table config dict\n\n Args:\n input_dir (path-like): directory containing input files\n config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.\n\n Returns:\n pd.DataFrame\n \"\"\"\n path = pathlib.Path(input_dir).joinpath(config['path'])\n kwargs = config['pd_read_kwargs']\n return pd.read_csv(path, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nadds s into filepath before the extension", "response": "def spliceext(filepath, s):\n \"\"\"Add s into filepath before the extension\n\n Args:\n filepath (str, path): file path\n s (str): string to splice\n\n Returns:\n str\n \"\"\"\n root, ext = os.path.splitext(safepath(filepath))\n return root + s + ext"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nreplace any existing file extension with a new one.", "response": "def replaceext(filepath, new_ext):\n \"\"\"Replace any existing file extension with a new one\n\n Example::\n\n >>> replaceext('/foo/bar.txt', 'py')\n '/foo/bar.py'\n >>> replaceext('/foo/bar.txt', '.doc')\n '/foo/bar.doc'\n\n Args:\n filepath (str, path): file path\n new_ext (str): new file extension; if a leading dot is not included,\n it will be added.\n\n Returns:\n Tuple[str]\n \"\"\"\n if new_ext and new_ext[0] != '.':\n new_ext = '.' + new_ext\n\n root, ext = os.path.splitext(safepath(filepath))\n return root + new_ext"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef splitext2(filepath):\n root, filename = os.path.split(safepath(filepath))\n filename, ext = os.path.splitext(safepath(filename))\n return root, filename, ext", "response": "Split filepath into root filename ext"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ndetermine if the file both exists and isempty", "response": "def isemptyfile(filepath):\n \"\"\"Determine if the file both exists and isempty\n\n Args:\n filepath (str, path): file path\n\n Returns:\n bool\n \"\"\"\n exists = os.path.exists(safepath(filepath))\n if exists:\n filesize = os.path.getsize(safepath(filepath))\n return filesize == 0\n else:\n return False"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef synctree(src, dst, onexist=None):\n src = pathlib.Path(src).resolve()\n dst = pathlib.Path(dst).resolve()\n\n if not src.is_dir():\n raise ValueError\n\n if dst.exists() and not dst.is_dir():\n raise ValueError\n\n if onexist is None:\n def onexist(): pass\n\n _synctree(src, dst, onexist)", "response": "Recursively sync files at directory src to dst"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef calculate_disc_entropy(X):\n X = asarray2d(X)\n n_samples, _ = X.shape\n _, counts = np.unique(X, axis=0, return_counts=True)\n empirical_p = counts * 1.0 / n_samples\n log_p = np.log(empirical_p)\n entropy = -np.sum(np.multiply(empirical_p, log_p))\n return entropy", "response": "r Calculates the exact Shannon entropy of a discrete dataset."} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nestimating the Shannon entropy of a discrete dataset.", "response": "def estimate_cont_entropy(X, epsilon=None):\n \"\"\"Estimate the Shannon entropy of a discrete dataset.\n\n Based off the Kraskov Estimator [1] and Kozachenko [2]\n estimators for a dataset's Shannon entropy.\n\n The function relies on nonparametric methods based on entropy\n estimation from k-nearest neighbors distances as proposed\n in [1] and augmented in [2] for mutual information estimation.\n\n If X's columns logically represent discrete features,\n it is better to use the calculate_disc_entropy function.\n If you are unsure of which to use, estimate_entropy can\n take datasets of mixed discrete and continuous functions.\n\n Args:\n X (array-like): An array-like (np arr, pandas df, etc.) with shape\n (n_samples, n_features) or (n_samples)\n epsilon (array-like): An array with shape (n_samples, 1) that is\n the epsilon used in Kraskov Estimator. Represents the chebyshev\n distance from an element to its k-th nearest neighbor in the full\n dataset.\n\n Returns:\n float: A floating-point number. If epsilon is not provided,\n this will be the Kozacheko Estimator of the dataset's entropy.\n If epsilon is provided, this is a partial estimation of the Kraskov\n entropy estimator. The bias is cancelled out when computing\n mutual information.\n\n References:\n\n .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\n information\". Phys. Rev. E 69, 2004.\n .. [2] L. F. Kozachenko, N. N. Leonenko, \"Sample Estimate of the Entropy\n of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16\n\n \"\"\"\n X = asarray2d(X)\n n_samples, n_features = X.shape\n if n_samples <= 1:\n return 0\n nn = NearestNeighbors(\n metric='chebyshev',\n n_neighbors=NUM_NEIGHBORS,\n algorithm='kd_tree')\n nn.fit(X)\n if epsilon is None:\n # If epsilon is not provided, revert to the Kozachenko Estimator\n n_neighbors = NUM_NEIGHBORS\n radius = 0\n # While we have non-zero radii, calculate for a larger k\n # Potentially expensive\n while not np.all(radius) and n_neighbors < n_samples:\n distances, _ = nn.kneighbors(\n n_neighbors=n_neighbors, return_distance=True)\n radius = distances[:, -1]\n n_neighbors += 1\n if n_neighbors == n_samples:\n # This case only happens if all samples are the same\n # e.g. this isn't a continuous sample...\n raise ValueError('Should not have discrete column to estimate')\n return -digamma(n_neighbors) + digamma(n_samples) + \\\n n_features * np.mean(np.log(2 * radius))\n else:\n ind = nn.radius_neighbors(\n radius=epsilon.ravel(),\n return_distance=False)\n nx = np.array([i.size for i in ind])\n return - np.mean(digamma(nx + 1)) + digamma(n_samples)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _calculate_epsilon(X):\n disc_mask = _get_discrete_columns(X)\n if np.all(disc_mask):\n # if all discrete columns, there's no point getting epsilon\n return 0\n cont_features = X[:, ~disc_mask]\n nn = NearestNeighbors(metric='chebyshev', n_neighbors=NUM_NEIGHBORS)\n nn.fit(cont_features)\n distances, _ = nn.kneighbors()\n epsilon = np.nextafter(distances[:, -1], 0)\n return asarray2d(epsilon)", "response": "Calculates the epsilon of the Kraskov Estimator."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef estimate_conditional_information(x, y, z):\n xz = np.concatenate((x, z), axis=1)\n yz = np.concatenate((y, z), axis=1)\n xyz = np.concatenate((xz, y), axis=1)\n epsilon = _calculate_epsilon(xyz)\n h_xz = estimate_entropy(xz, epsilon)\n h_yz = estimate_entropy(yz, epsilon)\n h_xyz = estimate_entropy(xyz, epsilon)\n h_z = estimate_entropy(z, epsilon)\n return max(0, h_xz + h_yz - h_xyz - h_z)", "response": "Estimate the conditional mutual information of two datasets."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nestimating the mutual information of two datasets.", "response": "def estimate_mutual_information(x, y):\n \"\"\"Estimate the mutual information of two datasets.\n\n Mutual information is a measure of dependence between\n two datasets and is calculated as:\n\n $I(x;y) = H(x) + H(y) - H(x,y)$\n\n Where H(x) is the Shannon entropy of x. For continuous datasets,\n adapts the Kraskov Estimator [1] for mutual information.\n\n Args:\n x (array-like): An array with shape (n_samples, n_features_x)\n y (array-like): An array with shape (n_samples, n_features_y)\n\n Returns:\n float: A floating point number representing the mutual\n information of x and y. This calculation is *exact*\n for entirely discrete datasets and *approximate* if\n there are continuous columns present.\n\n References:\n\n .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\n information\". Phys. Rev. E 69, 2004.\n \"\"\"\n xy = np.concatenate((x, y), axis=1)\n epsilon = _calculate_epsilon(xy)\n h_x = estimate_entropy(x, epsilon)\n h_y = estimate_entropy(y, epsilon)\n h_xy = estimate_entropy(xy, epsilon)\n return max(0, h_x + h_y - h_xy)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_diff_endpoints_from_commit_range(repo, commit_range):\n if not commit_range:\n raise ValueError('commit_range cannot be empty')\n\n result = re_find(COMMIT_RANGE_REGEX, commit_range)\n if not result:\n raise ValueError(\n 'Expected diff str of the form \\'a..b\\' or \\'a...b\\' (got {})'\n .format(commit_range))\n a, b = result['a'], result['b']\n a, b = repo.rev_parse(a), repo.rev_parse(b)\n if result['thirddot']:\n a = one_or_raise(repo.merge_base(a, b))\n return a, b", "response": "Get the endpoints of a diff given a commit range."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef set_config_variables(repo, variables):\n with repo.config_writer() as writer:\n for k, value in variables.items():\n section, option = k.split('.')\n writer.set_value(section, option, value)\n writer.release()", "response": "Set config variables for the current node"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\ncollect and validate all new features", "response": "def validate(self):\n \"\"\"Collect and validate all new features\"\"\"\n\n changes = self.change_collector.collect_changes()\n\n features = []\n imported_okay = True\n for importer, modname, modpath in changes.new_feature_info:\n try:\n mod = importer()\n features.extend(_get_contrib_features(mod))\n except (ImportError, SyntaxError):\n logger.info(\n 'Failed to import module at {}'\n .format(modpath))\n logger.exception('Exception details: ')\n imported_okay = False\n\n if not imported_okay:\n return False\n\n # if no features were added at all, reject\n if not features:\n logger.info('Failed to collect any new features.')\n return False\n\n return all(\n validate_feature_api(feature, self.X, self.y, subsample=False)\n for feature in features\n )"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nload config at exact path", "response": "def load_config_at_path(path):\n \"\"\"Load config at exact path\n\n Args:\n path (path-like): path to config file\n\n Returns:\n dict: config dict\n \"\"\"\n if path.exists() and path.is_file():\n with path.open('r') as f:\n return yaml.load(f, Loader=yaml.SafeLoader)\n else:\n raise ConfigurationError(\"Couldn't find ballet.yml config file.\")"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nget a configuration option following a path through the configdict", "response": "def config_get(config, *path, default=None):\n \"\"\"Get a configuration option following a path through the config\n\n Example usage:\n\n >>> config_get(config,\n 'problem', 'problem_type_details', 'scorer',\n default='accuracy')\n\n Args:\n config (dict): config dict\n *path (list[str]): List of config sections and options to follow.\n default (default=None): A default value to return in the case that\n the option does not exist.\n \"\"\"\n o = object()\n result = get_in(config, path, default=o)\n if result is not o:\n return result\n else:\n return default"} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns a function to get configuration options for a specific project", "response": "def make_config_get(conf_path):\n \"\"\"Return a function to get configuration options for a specific project\n\n Args:\n conf_path (path-like): path to project's conf file (i.e. foo.conf\n module)\n \"\"\"\n project_root = _get_project_root_from_conf_path(conf_path)\n config = load_config_in_dir(project_root)\n return partial(config_get, config)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef relative_to_contrib(diff, project):\n path = pathlib.Path(diff.b_path)\n contrib_path = project.contrib_module_path\n return path.relative_to(contrib_path)", "response": "Compute relative path of changed file to contrib dir"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef pr_num(self):\n result = get_pr_num(repo=self.repo)\n if result is None:\n result = get_travis_pr_num()\n return result", "response": "Return the PR number or None if not on a PR"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nreturn whether the project is on master branch", "response": "def branch(self):\n \"\"\"Return whether the project is on master branch\"\"\"\n result = get_branch(repo=self.repo)\n if result is None:\n result = get_travis_branch()\n return result"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nreturning the project path", "response": "def path(self):\n \"\"\"Return the project path (aka project root)\n\n If ``package.__file__`` is ``/foo/foo/__init__.py``, then project.path\n should be ``/foo``.\n \"\"\"\n return pathlib.Path(self.package.__file__).resolve().parent.parent"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\ncast to 2d array", "response": "def asarray2d(a):\n \"\"\"Cast to 2d array\"\"\"\n arr = np.asarray(a)\n if arr.ndim == 1:\n arr = arr.reshape(-1, 1)\n return arr"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef get_arr_desc(arr):\n type_ = type(arr).__name__ # see also __qualname__\n shape = getattr(arr, 'shape', None)\n if shape is not None:\n desc = '{type_} {shape}'\n else:\n desc = '{type_} '\n return desc.format(type_=type_, shape=shape)", "response": "Get array description in the form ' "} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef indent(text, n=4):\n _indent = ' ' * n\n return '\\n'.join(_indent + line for line in text.split('\\n'))", "response": "Indent each line of text by n spaces"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nchecking if obj has any NaNs", "response": "def has_nans(obj):\n \"\"\"Check if obj has any NaNs\n\n Compatible with different behavior of np.isnan, which sometimes applies\n over all axes (py35, py35) and sometimes does not (py34).\n \"\"\"\n nans = np.isnan(obj)\n while np.ndim(nans):\n nans = np.any(nans)\n return bool(nans)"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef needs_path(f):\n @wraps(f)\n def wrapped(pathlike, *args, **kwargs):\n path = pathlib.Path(pathlike)\n return f(path, *args, **kwargs)\n return wrapped", "response": "Wraps a function that accepts path - like to give it a pathlib. Path"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef import_module_at_path(modname, modpath):\n # TODO just keep navigating up in the source tree until an __init__.py is\n # not found?\n\n modpath = pathlib.Path(modpath).resolve()\n\n if modpath.name == '__init__.py':\n # TODO improve debugging output with recommend change\n raise ValueError('Don\\'t provide the __init__.py!')\n\n def is_package(modpath):\n return modpath.suffix != '.py'\n\n def has_init(dir):\n return dir.joinpath('__init__.py').is_file()\n\n def has_package_structure(modname, modpath):\n modparts = modname.split('.')\n n = len(modparts)\n dir = modpath\n if not is_package(modpath):\n n = n - 1\n dir = dir.parent\n while n > 0:\n if not has_init(dir):\n return False\n dir = dir.parent\n n = n - 1\n\n return True\n\n if not has_package_structure(modname, modpath):\n raise ImportError('Module does not have valid package structure.')\n\n parentpath = str(pathlib.Path(modpath).parent)\n finder = pkgutil.get_importer(parentpath)\n loader = finder.find_module(modname)\n if loader is None:\n raise ImportError(\n 'Failed to find loader for module {} within dir {}'\n .format(modname, parentpath))\n mod = loader.load_module(modname)\n\n # TODO figure out what to do about this\n assert mod.__name__ == modname\n\n return mod", "response": "Import module from path that may not be on system path."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nconvert relative path to a module name.", "response": "def relpath_to_modname(relpath):\n \"\"\"Convert relative path to module name\n\n Within a project, a path to the source file is uniquely identified with a\n module name. Relative paths of the form 'foo/bar' are *not* converted to\n module names 'foo.bar', because (1) they identify directories, not regular\n files, and (2) already 'foo/bar/__init__.py' would claim that conversion.\n\n Args:\n relpath (str): Relative path from some location on sys.path\n\n Example:\n >>> relpath_to_modname('ballet/util/_util.py')\n 'ballet.util._util'\n \"\"\"\n # don't try to resolve!\n p = pathlib.Path(relpath)\n\n if p.name == '__init__.py':\n p = p.parent\n elif p.suffix == '.py':\n p = p.with_suffix('')\n else:\n msg = 'Cannot convert a non-python file to a modname'\n msg_detail = 'The relpath given is: {}'.format(relpath)\n logger.error(msg + '\\n' + msg_detail)\n raise ValueError(msg)\n\n return '.'.join(p.parts)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nconvert a module name to relative path.", "response": "def modname_to_relpath(modname, project_root=None, add_init=True):\n \"\"\"Convert module name to relative path.\n\n The project root is usually needed to detect if the module is a package, in\n which case the relevant file is the `__init__.py` within the subdirectory.\n\n Example:\n >>> modname_to_relpath('foo.features')\n 'foo/features.py'\n >>> modname_to_relpath('foo.features',\n project_root='/path/to/project')\n 'foo/features/__init__.py'\n\n Args:\n modname (str): Module name, e.g. `os.path`\n project_root (str): Path to project root\n add_init (bool): Whether to add `__init__.py` to the path of modules\n that are packages. Defaults to True\n\n Returns:\n str\n \"\"\"\n parts = modname.split('.')\n relpath = pathlib.Path(*parts)\n\n # is the module a package? if so, the relpath identifies a directory\n # it is easier to check for whether a file is a directory than to try to\n # import the module dynamically and see whether it is a package\n if project_root is not None:\n relpath_resolved = pathlib.Path(project_root).joinpath(relpath)\n else:\n relpath_resolved = relpath\n\n if relpath_resolved.is_dir():\n if add_init:\n relpath = relpath.joinpath('__init__.py')\n else:\n relpath = str(relpath) + '.py'\n\n return str(relpath)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef check(self, feature):\n input = feature.input\n is_str = isa(str)\n is_nested_str = all_fn(\n iterable, lambda x: all(is_str, x))\n assert is_str(input) or is_nested_str(input)", "response": "Check that the feature s input is a str or Iterable [ str ]"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef check(self, feature):\n assert hasattr(feature.transformer, 'fit')\n assert hasattr(feature.transformer, 'transform')\n assert hasattr(feature.transformer, 'fit_transform')", "response": "Check that the feature has a fit transform and fit_tranform interface"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef check(self, feature):\n mapper = feature.as_dataframe_mapper()\n mapper.fit(self.X, y=self.y)", "response": "Check that fit can be called on reference data"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nchecks that fit_transform can be called on reference data", "response": "def check(self, feature):\n \"\"\"Check that fit_transform can be called on reference data\"\"\"\n mapper = feature.as_dataframe_mapper()\n mapper.fit_transform(self.X, y=self.y)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nchecking that the dimensions of the transformed data are correct.", "response": "def check(self, feature):\n \"\"\"Check that the dimensions of the transformed data are correct\n\n For input X, an n x p array, a n x q array should be produced,\n where q is the number of features produced by the logical feature.\n \"\"\"\n mapper = feature.as_dataframe_mapper()\n X = mapper.fit_transform(self.X, y=self.y)\n assert self.X.shape[0] == X.shape[0]"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef check(self, feature):\n try:\n buf = io.BytesIO()\n pickle.dump(feature, buf, protocol=pickle.HIGHEST_PROTOCOL)\n buf.seek(0)\n new_feature = pickle.load(buf)\n assert new_feature is not None\n assert isinstance(new_feature, Feature)\n finally:\n buf.close()", "response": "Check that the feature can be pickled by pickler"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef check(self, feature):\n mapper = feature.as_dataframe_mapper()\n X = mapper.fit_transform(self.X, y=self.y)\n assert not np.any(np.isnan(X))", "response": "Check that the output of the transformer has no missing values"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef make_multi_lagger(lags, groupby_kwargs=None):\n laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]\n feature_union = FeatureUnion([\n (repr(lagger), lagger) for lagger in laggers\n ])\n return feature_union", "response": "Return a union of transformers that apply different lags\n "} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nstarting a new feature within a ballet project", "response": "def start_new_feature(**cc_kwargs):\n \"\"\"Start a new feature within a ballet project\n\n Renders the feature template into a temporary directory, then copies the\n feature files into the proper path within the contrib directory.\n\n Args:\n **cc_kwargs: options for the cookiecutter template\n\n Raises:\n ballet.exc.BalletError: the new feature has the same name as an\n existing one\n \"\"\"\n project = Project.from_path(pathlib.Path.cwd().resolve())\n contrib_dir = project.get('contrib', 'module_path')\n\n with tempfile.TemporaryDirectory() as tempdir:\n # render feature template\n output_dir = tempdir\n cc_kwargs['output_dir'] = output_dir\n rendered_dir = render_feature_template(**cc_kwargs)\n\n # copy into contrib dir\n src = rendered_dir\n dst = contrib_dir\n synctree(src, dst, onexist=_fail_if_feature_exists)\n\n logger.info('Start new feature successful.')"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets the proposed feature from the project.", "response": "def get_proposed_feature(project):\n \"\"\"Get the proposed feature\n\n The path of the proposed feature is determined by diffing the project\n against a comparison branch, such as master. The feature is then imported\n from that path and returned.\n\n Args:\n project (ballet.project.Project): project info\n\n Raises:\n ballet.exc.BalletError: more than one feature collected\n \"\"\"\n change_collector = ChangeCollector(project)\n collected_changes = change_collector.collect_changes()\n try:\n new_feature_info = one_or_raise(collected_changes.new_feature_info)\n importer, _, _ = new_feature_info\n except ValueError:\n raise BalletError('Too many features collected')\n module = importer()\n feature = _get_contrib_feature_from_module(module)\n return feature"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef get_accepted_features(features, proposed_feature):\n def eq(feature):\n \"\"\"Features are equal if they have the same source\n\n At least in this implementation...\n \"\"\"\n return feature.source == proposed_feature.source\n\n # deselect features that match the proposed feature\n result = lfilter(complement(eq), features)\n\n if len(features) - len(result) == 1:\n return result\n elif len(result) == len(features):\n raise BalletError(\n 'Did not find match for proposed feature within \\'contrib\\'')\n else:\n raise BalletError(\n 'Unexpected condition (n_features={}, n_result={})'\n .format(len(features), len(result)))", "response": "Returns a list of all features that have not been accepted by the proposed feature."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef collect_changes(self):\n\n file_diffs = self._collect_file_diffs()\n candidate_feature_diffs, valid_init_diffs, inadmissible_diffs = \\\n self._categorize_file_diffs(file_diffs)\n new_feature_info = self._collect_feature_info(candidate_feature_diffs)\n\n return CollectedChanges(\n file_diffs, candidate_feature_diffs, valid_init_diffs,\n inadmissible_diffs, new_feature_info)", "response": "Collect file and feature changes for a specific branch of the current pull request."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\npartitions file changes into admissible and inadmissible changes", "response": "def _categorize_file_diffs(self, file_diffs):\n \"\"\"Partition file changes into admissible and inadmissible changes\"\"\"\n # TODO move this into a new validator\n candidate_feature_diffs = []\n valid_init_diffs = []\n inadmissible_files = []\n\n for diff in file_diffs:\n valid, failures = check_from_class(\n ProjectStructureCheck, diff, self.project)\n if valid:\n if pathlib.Path(diff.b_path).parts[-1] != '__init__.py':\n candidate_feature_diffs.append(diff)\n logger.debug(\n 'Categorized {file} as CANDIDATE FEATURE MODULE'\n .format(file=diff.b_path))\n else:\n valid_init_diffs.append(diff)\n logger.debug(\n 'Categorized {file} as VALID INIT MODULE'\n .format(file=diff.b_path))\n else:\n inadmissible_files.append(diff)\n logger.debug(\n 'Categorized {file} as INADMISSIBLE; '\n 'failures were {failures}'\n .format(file=diff.b_path, failures=failures))\n\n logger.info(\n 'Admitted {} candidate feature{} '\n 'and {} __init__ module{} '\n 'and rejected {} file{}'\n .format(len(candidate_feature_diffs),\n make_plural_suffix(candidate_feature_diffs),\n len(valid_init_diffs),\n make_plural_suffix(valid_init_diffs),\n len(inadmissible_files),\n make_plural_suffix(inadmissible_files)))\n\n return candidate_feature_diffs, valid_init_diffs, inadmissible_files"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _collect_feature_info(self, candidate_feature_diffs):\n project_root = self.project.path\n for diff in candidate_feature_diffs:\n path = diff.b_path\n modname = relpath_to_modname(path)\n modpath = project_root.joinpath(path)\n importer = partial(import_module_at_path, modname, modpath)\n yield importer, modname, modpath", "response": "Collect feature info from the candidate_feature_diffs list."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngetting current branch per Travis environment variables", "response": "def get_travis_branch():\n \"\"\"Get current branch per Travis environment variables\n\n If travis is building a PR, then TRAVIS_PULL_REQUEST is truthy and the\n name of the branch corresponding to the PR is stored in the\n TRAVIS_PULL_REQUEST_BRANCH environment variable. Else, the name of the\n branch is stored in the TRAVIS_BRANCH environment variable.\n\n See also: \n \"\"\" # noqa E501\n try:\n travis_pull_request = get_travis_env_or_fail('TRAVIS_PULL_REQUEST')\n if truthy(travis_pull_request):\n travis_pull_request_branch = get_travis_env_or_fail(\n 'TRAVIS_PULL_REQUEST_BRANCH')\n return travis_pull_request_branch\n else:\n travis_branch = get_travis_env_or_fail('TRAVIS_BRANCH')\n return travis_branch\n except UnexpectedTravisEnvironmentError:\n return None"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef make_mapper(features):\n if not features:\n features = Feature(input=[], transformer=NullTransformer())\n if not iterable(features):\n features = (features, )\n return DataFrameMapper(\n [t.as_input_transformer_tuple() for t in features],\n input_df=True)", "response": "Make a DataFrameMapper from a feature or list of features"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ngenerating names for estimators.", "response": "def _name_estimators(estimators):\n \"\"\"Generate names for estimators.\n\n Adapted from sklearn.pipeline._name_estimators\n \"\"\"\n\n def get_name(estimator):\n if isinstance(estimator, DelegatingRobustTransformer):\n return get_name(estimator._transformer)\n\n return type(estimator).__name__.lower()\n\n names = list(map(get_name, estimators))\n counter = dict(Counter(names))\n counter = select_values(lambda x: x > 1, counter)\n\n for i in reversed(range(len(estimators))):\n name = names[i]\n if name in counter:\n names[i] += \"-%d\" % counter[name]\n counter[name] -= 1\n\n return list(zip(names, estimators))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\npushing default branch and project template branch to remote", "response": "def _push(project):\n \"\"\"Push default branch and project template branch to remote\n\n With default config (i.e. remote and branch names), equivalent to::\n\n $ git push origin master:master project-template:project-template\n\n Raises:\n ballet.exc.BalletError: Push failed in some way\n \"\"\"\n repo = project.repo\n remote_name = project.get('project', 'remote')\n remote = repo.remote(remote_name)\n result = _call_remote_push(remote)\n failures = lfilter(complement(did_git_push_succeed), result)\n if failures:\n for push_info in failures:\n logger.error(\n 'Failed to push ref {from_ref} to {to_ref}'\n .format(from_ref=push_info.local_ref.name,\n to_ref=push_info.remote_ref.name))\n raise BalletError('Push failed')"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nbuilding features and target trees for the current log entry.", "response": "def build(X_df=None, y_df=None):\n \"\"\"Build features and target\n\n Args:\n X_df (DataFrame): raw variables\n y_df (DataFrame): raw target\n\n Returns:\n dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y\n \"\"\"\n if X_df is None:\n X_df, _ = load_data()\n if y_df is None:\n _, y_df = load_data()\n\n features = get_contrib_features()\n mapper_X = ballet.feature.make_mapper(features)\n X = mapper_X.fit_transform(X_df)\n\n encoder_y = get_target_encoder()\n y = encoder_y.fit_transform(y_df)\n\n return {\n 'X_df': X_df,\n 'features': features,\n 'mapper_X': mapper_X,\n 'X': X,\n 'y_df': y_df,\n 'encoder_y': encoder_y,\n 'y': y,\n }"} {"SOURCE": "codesearchnet", "instruction": "Implement a function in Python 3 to\nengineer features and targets", "response": "def main(input_dir, output_dir):\n \"\"\"Engineer features\"\"\"\n\n import ballet.util.log\n ballet.util.log.enable(logger=logger, level='INFO', echo=False)\n ballet.util.log.enable(logger=ballet.util.log.logger, level='INFO',\n echo=False)\n\n X_df, y_df = load_data(input_dir=input_dir)\n out = build()\n\n mapper_X = out['mapper_X']\n encoder_y = out['encoder_y']\n\n X_ft = mapper_X.transform(X_df)\n y_ft = encoder_y.transform(y_df)\n\n save_features(X_ft, output_dir)\n save_targets(y_ft, output_dir)"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef load_data(input_dir=None):\n if input_dir is not None:\n tables = conf.get('tables')\n\n entities_table_name = conf.get('data', 'entities_table_name')\n entities_config = some(where(tables, name=entities_table_name))\n X = load_table_from_config(input_dir, entities_config)\n\n targets_table_name = conf.get('data', 'targets_table_name')\n targets_config = some(where(tables, name=targets_table_name))\n y = load_table_from_config(input_dir, targets_config)\n else:\n raise NotImplementedError\n\n return X, y", "response": "Load data from a directory."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nwrites the CSV header row with column names.", "response": "def _write_header(self, epoch_data: EpochData) -> None:\n \"\"\"\n Write CSV header row with column names.\n\n Column names are inferred from the ``epoch_data`` and ``self.variables`` (if specified).\n Variables and streams expected later on are stored in ``self._variables`` and ``self._streams`` respectively.\n\n :param epoch_data: epoch data to be logged\n \"\"\"\n self._variables = self._variables or list(next(iter(epoch_data.values())).keys())\n self._streams = epoch_data.keys()\n\n header = ['\"epoch_id\"']\n for stream_name in self._streams:\n header += [stream_name + '_' + var for var in self._variables]\n with open(self._file_path, 'a') as file:\n file.write(self._delimiter.join(header) + '\\n')\n self._header_written = True"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _write_row(self, epoch_id: int, epoch_data: EpochData) -> None:\n\n # list of values to be written\n values = [epoch_id]\n\n for stream_name in self._streams:\n for variable_name in self._variables:\n column_name = stream_name+'_'+variable_name\n try:\n value = epoch_data[stream_name][variable_name]\n except KeyError as ex:\n err_message = '`{}` not found in epoch data.'.format(column_name)\n if self._on_missing_variable == 'error':\n raise KeyError(err_message) from ex\n elif self._on_missing_variable == 'warn':\n logging.warning(err_message)\n values.append(self._default_value)\n continue\n\n if isinstance(value, dict) and 'mean' in value:\n value = value['mean']\n elif isinstance(value, dict) and 'nanmean' in value:\n value = value['nanmean']\n\n if np.isscalar(value):\n values.append(value)\n else:\n err_message = 'Variable `{}` value is not scalar.'.format(variable_name)\n if self._on_unknown_type == 'error':\n raise TypeError(err_message)\n elif self._on_unknown_type == 'warn':\n logging.warning(err_message)\n values.append(self._default_value)\n\n # write the row\n with open(self._file_path, 'a') as file:\n row = self._delimiter.join([str(value) for value in values])\n file.write(row + '\\n')", "response": "Write a single epoch result row to the CSV file."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:\n logging.debug('Saving epoch %d data to \"%s\"', epoch_id, self._file_path)\n if not self._header_written:\n self._write_header(epoch_data=epoch_data)\n self._write_row(epoch_id=epoch_id, epoch_data=epoch_data)", "response": "Write a new row to the CSV file with the given epoch data."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef get_random_name(sep: str='-'):\n r = random.SystemRandom()\n return '{}{}{}'.format(r.choice(_left), sep, r.choice(_right))", "response": "Generate random docker - like name with the given separator."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _check_train_time(self) -> None:\n if self._minutes is not None and (datetime.now() - self._training_start).total_seconds()/60 > self._minutes:\n raise TrainingTerminated('Training terminated after more than {} minutes'.format(self._minutes))", "response": "Stop the training if the training time exceeded self. _minutes."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncall by the training thread when a batch is complete.", "response": "def after_batch(self, stream_name: str, batch_data: Batch) -> None:\n \"\"\"\n If ``stream_name`` equals to :py:attr:`cxflow.constants.TRAIN_STREAM`,\n increase the iterations counter and possibly stop the training; additionally, call :py:meth:`_check_train_time`.\n\n :param stream_name: stream name\n :param batch_data: ignored\n :raise TrainingTerminated: if the number of iterations reaches ``self._iters``\n \"\"\"\n self._check_train_time()\n if self._iters is not None and stream_name == self._train_stream_name:\n self._iters_done += 1\n if self._iters_done >= self._iters:\n raise TrainingTerminated('Training terminated after iteration {}'.format(self._iters_done))"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:\n self._check_train_time()\n if self._epochs is not None and epoch_id >= self._epochs:\n logging.info('EpochStopperHook triggered')\n raise TrainingTerminated('Training terminated after epoch {}'.format(epoch_id))", "response": "Stop training if the epoch_id reaches self. _epochs."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef sanitize_url(url: str) -> str:\n\n for part in reversed(url.split('/')):\n filename = re.sub(r'[^a-zA-Z0-9_.\\-]', '', part)\n if len(filename) > 0:\n break\n else:\n raise ValueError('Could not create reasonable name for file from url %s', url)\n\n return filename", "response": "Sanitize the given url so that it can be used as a valid filename."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef maybe_download_and_extract(data_root: str, url: str) -> None:\n\n # make sure data_root exists\n os.makedirs(data_root, exist_ok=True)\n\n # create sanitized filename from url\n filename = sanitize_url(url)\n\n # check whether the archive already exists\n filepath = os.path.join(data_root, filename)\n if os.path.exists(filepath):\n logging.info('\\t`%s` already exists; skipping', filepath)\n return\n\n # download with progressbar\n try:\n logging.info('\\tdownloading %s', filepath)\n req = requests.get(url, stream=True)\n req.raise_for_status()\n except requests.exceptions.RequestException as ex:\n logging.error('File `%s` could not be downloaded, %s', filepath, ex)\n return\n\n expected_size = int(req.headers.get('content-length'))\n chunk_size = 1024\n with open(filepath, 'wb') as f_out,\\\n click.progressbar(req.iter_content(chunk_size=chunk_size), length=expected_size/chunk_size) as bar:\n for chunk in bar:\n if chunk:\n f_out.write(chunk)\n f_out.flush()\n\n # extract\n try:\n shutil.unpack_archive(filepath, data_root)\n except (shutil.ReadError, ValueError):\n logging.info('File `%s` could not be extracted by `shutil.unpack_archive`. Please process it manually.',\n filepath)", "response": "Download the specified file to data_root and try to unpack it with shutil. unpack_archive."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncheck whether the given aggregation is present in NumPy.", "response": "def _raise_check_aggregation(aggregation: str):\n \"\"\"\n Check whether the given aggregation is present in NumPy or it is one of EXTRA_AGGREGATIONS.\n\n :param aggregation: the aggregation name\n :raise ValueError: if the specified aggregation is not supported or found in NumPy\n \"\"\"\n if aggregation not in ComputeStats.EXTRA_AGGREGATIONS and not hasattr(np, aggregation):\n raise ValueError('Aggregation `{}` is not a NumPy function or a member '\n 'of EXTRA_AGGREGATIONS.'.format(aggregation))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _compute_aggregation(aggregation: str, data: Iterable[Any]):\n ComputeStats._raise_check_aggregation(aggregation)\n if aggregation == 'nanfraction':\n return np.sum(np.isnan(data)) / len(data)\n if aggregation == 'nancount':\n return int(np.sum(np.isnan(data)))\n return getattr(np, aggregation)(data)", "response": "Compute the specified aggregation on the given data."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _save_stats(self, epoch_data: EpochData) -> None:\n\n for stream_name in epoch_data.keys():\n for variable, aggregations in self._variable_aggregations.items():\n # variables are already checked in the AccumulatingHook; hence, we do not check them here\n epoch_data[stream_name][variable] = OrderedDict(\n {aggr: ComputeStats._compute_aggregation(aggr, self._accumulator[stream_name][variable])\n for aggr in aggregations})", "response": "Save the aggregate statistics for each variable in the given epoch data."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef after_epoch(self, epoch_data: EpochData, **kwargs) -> None:\n self._save_stats(epoch_data)\n super().after_epoch(epoch_data=epoch_data, **kwargs)", "response": "Compute the specified aggregations and save them to the given epoch data."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef save(self) -> None:\n if self._output_dir is None:\n raise ValueError('Can not save TrainingTrace without output dir.')\n yaml_to_file(self._trace, self._output_dir, CXF_TRACE_FILE)", "response": "Saves the training trace to the output directory."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef from_file(filepath: str):\n trace = TrainingTrace()\n trace._trace = load_config(filepath)\n return trace", "response": "Load training trace from the given filepath."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\ncheck termination conditions. :param epoch_id: number of the processed epoch :param epoch_data: epoch data to be checked :raise KeyError: if the stream of variable was not found in ``epoch_data`` :raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation :raise ValueError: if the specified number of epochs exceeded :raise TrainingTerminated: if the monitor variable is above the required level", "response": "def after_epoch(self, epoch_id: int, epoch_data: EpochData):\n \"\"\"\n Check termination conditions.\n\n :param epoch_id: number of the processed epoch\n :param epoch_data: epoch data to be checked\n :raise KeyError: if the stream of variable was not found in ``epoch_data``\n :raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation\n :raise ValueError: if the specified number of epochs exceeded\n :raise TrainingTerminated: if the monitor variable is above the required level\n \"\"\"\n\n if self._stream not in epoch_data:\n raise KeyError('The hook could not determine whether the threshold was exceeded as the stream `{}`'\n 'was not found in the epoch data'.format(self._stream))\n\n if self._variable not in epoch_data[self._stream]:\n raise KeyError('The hook could not determine whether the threshold was exceeded as the variable `{}`'\n 'was not found in the epoch data stream `{}`'.format(self._variable, self._stream))\n\n value = epoch_data[self._stream][self._variable]\n if isinstance(value, dict) and 'mean' in value:\n value = value['mean']\n\n if not np.isscalar(value):\n raise TypeError('The value to be checked has to be either a scalar or a dict with `mean` key. '\n 'Got `{}` instead.'.format(type(value).__name__))\n\n if value > self._required_min_value:\n raise TrainingTerminated('{} {} level matched (current {} is greater than required {}).'\n .format(self._stream, self._variable, value, self._required_min_value))\n elif epoch_id >= self._max_epoch:\n raise ValueError('{} {} was only {} in epoch {}, but {} was required. Training failed.'\n .format(self._stream, self._variable, value, epoch_id, self._required_min_value))"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef train(config_path: str, cl_arguments: Iterable[str], output_root: str) -> None:\n config = None\n\n try:\n config_path = find_config(config_path)\n config = load_config(config_file=config_path, additional_args=cl_arguments)\n validate_config(config)\n logging.debug('\\tLoaded config: %s', config)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Loading config failed', ex)\n\n run(config=config, output_root=output_root)", "response": "Load config and start the training."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nevaluates the given model on the specified data stream.", "response": "def evaluate(model_path: str, stream_name: str, config_path: Optional[str], cl_arguments: Iterable[str],\n output_root: str) -> None:\n \"\"\"\n Evaluate the given model on the specified data stream.\n\n Configuration is updated by the respective predict.stream_name section, in particular:\n - hooks section is entirely replaced\n - model and dataset sections are updated\n\n :param model_path: path to the model to be evaluated\n :param stream_name: data stream name to be evaluated\n :param config_path: path to the config to be used, if not specified infer the path from ``model_path``\n :param cl_arguments: additional command line arguments which will update the configuration\n :param output_root: output root in which the training directory will be created\n \"\"\"\n config = None\n\n try:\n model_dir = path.dirname(model_path) if not path.isdir(model_path) else model_path\n config_path = find_config(model_dir if config_path is None else config_path)\n config = load_config(config_file=config_path, additional_args=cl_arguments)\n\n if stream_name == CXF_PREDICT_STREAM and stream_name in config: # old style ``cxflow predict ...``\n logging.warning('Old style ``predict`` configuration section is deprecated and will not be supported, '\n 'use ``eval.predict`` section instead.')\n config['eval'] = {'predict': config['predict']}\n\n if 'eval' in config and stream_name in config['eval']:\n update_section = config['eval'][stream_name]\n for subsection in ['dataset', 'model', 'main_loop']:\n if subsection in update_section:\n config[subsection].update(update_section[subsection])\n if 'hooks' in update_section:\n config['hooks'] = update_section['hooks']\n else:\n logging.warning('Config does not contain `eval.%s.hooks` section. '\n 'No hook will be employed during the evaluation.', stream_name)\n config['hooks'] = []\n\n validate_config(config)\n\n logging.debug('\\tLoaded config: %s', config)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Loading config failed', ex)\n\n run(config=config, output_root=output_root, restore_from=model_path, eval=stream_name)"} {"SOURCE": "codesearchnet", "instruction": "How would you code a function in Python 3 to\nrun prediction from the specified config file.", "response": "def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:\n \"\"\"\n Run prediction from the specified config path.\n\n If the config contains a ``predict`` section:\n - override hooks with ``predict.hooks`` if present\n - update dataset, model and main loop sections if the respective sections are present\n\n :param config_path: path to the config file or the directory in which it is stored\n :param restore_from: backend-specific path to the already trained model to be restored from.\n If ``None`` is passed, it is inferred from the configuration file location as the directory\n it is located in.\n :param cl_arguments: additional command line arguments which will update the configuration\n :param output_root: output root in which the training directory will be created\n \"\"\"\n config = None\n\n try:\n config_path = find_config(config_path)\n restore_from = restore_from or path.dirname(config_path)\n config = load_config(config_file=config_path, additional_args=cl_arguments)\n\n if 'predict' in config:\n for section in ['dataset', 'model', 'main_loop']:\n if section in config['predict']:\n config[section].update(config['predict'][section])\n if 'hooks' in config['predict']:\n config['hooks'] = config['predict']['hooks']\n else:\n logging.warning('Config does not contain `predict.hooks` section. '\n 'No hook will be employed during the prediction.')\n config['hooks'] = []\n\n validate_config(config)\n\n logging.debug('\\tLoaded config: %s', config)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Loading config failed', ex)\n\n run(config=config, output_root=output_root, restore_from=restore_from, eval='predict')"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _create_epoch_data(self, streams: Optional[Iterable[str]]=None) -> EpochData:\n if streams is None:\n streams = [self._train_stream_name] + self._extra_streams\n return OrderedDict([(stream_name, OrderedDict()) for stream_name in streams])", "response": "Create empty epoch data double dict."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nchecking for unused and missing sources.", "response": "def _check_sources(self, batch: Dict[str, object]) -> None:\n \"\"\"\n Check for unused and missing sources.\n\n :param batch: batch to be checked\n :raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error``\n \"\"\"\n unused_sources = [source for source in batch.keys() if source not in self._model.input_names]\n missing_sources = [source for source in self._model.input_names if source not in batch.keys()]\n # check stream sources\n if unused_sources:\n if self._on_unused_sources == 'warn' and not self._extra_sources_warned:\n logging.warning('Some sources provided by the stream do not match model placeholders. Set '\n '`main_loop.on_unused_sources` to `ignore` in order to suppress this warning. '\n 'Extra sources: %s', unused_sources)\n self._extra_sources_warned = True\n elif self._on_unused_sources == 'error':\n raise ValueError('Some sources provided by the stream do not match model placeholders. Set'\n '`main_loop.on_unused_sources` to `warn` in order to suppress this error.\\n'\n 'Extra sources: {}'.format(unused_sources))\n\n if missing_sources:\n raise ValueError('Stream does not provide all required sources. Missing sources: {}'\n .format(missing_sources))"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\niterates through the given stream and evaluate/train the model with the received batches. Calls :py:meth:`cxflow.hooks.AbstractHook.after_batch` events. :param stream: stream to iterate :param train: if set to ``True``, the model will be trained :raise ValueError: in case of empty batch when ``on_empty_batch`` is set to ``error`` :raise ValueError: in case of empty stream when ``on_empty_stream`` is set to ``error`` :raise ValueError: in case of two batch variables having different lengths", "response": "def _run_epoch(self, stream: StreamWrapper, train: bool) -> None:\n \"\"\"\n Iterate through the given stream and evaluate/train the model with the received batches.\n\n Calls :py:meth:`cxflow.hooks.AbstractHook.after_batch` events.\n\n :param stream: stream to iterate\n :param train: if set to ``True``, the model will be trained\n :raise ValueError: in case of empty batch when ``on_empty_batch`` is set to ``error``\n :raise ValueError: in case of empty stream when ``on_empty_stream`` is set to ``error``\n :raise ValueError: in case of two batch variables having different lengths\n \"\"\"\n nonempty_batch_count = 0\n for i, batch_input in enumerate(stream):\n self.raise_check_interrupt()\n\n batch_sizes = {len(source) for source in batch_input.values()}\n if len(batch_sizes) == 0 or batch_sizes == {0}:\n if self._on_empty_batch == 'warn':\n logging.warning('%i-th batch in stream `%s` appears to be empty (%i-th empty batch in total). Set '\n '`main_loop.on_empty_batch` to `ignore` in order to suppress this warning.',\n i, stream.name, nonempty_batch_count)\n elif self._on_empty_batch == 'error':\n raise ValueError('{}-th batch in stream `{}` appears to be empty ({}-th empty batch in total). Set '\n '`main_loop.on_empty_batch` to `warn` in order to change this error into warning; '\n 'set to `ignore` to remove it.'.format(i, stream.name, nonempty_batch_count))\n continue\n elif self._fixed_batch_size:\n if batch_sizes != {self._fixed_batch_size}:\n var, len_ = [(k, len(v)) for k, v in batch_input.items() if len(v) != self._fixed_batch_size][0]\n logging.debug('%i-th batch in stream `%s` has variable `%s` of length %i inconsistent with '\n '`main_loop.fixed_size` = %i', i, stream.name, var, len_, self._fixed_batch_size)\n continue\n nonempty_batch_count += 1\n\n self._check_sources(batch_input)\n\n with Timer('eval_batch_{}'.format(stream.name), self._epoch_profile):\n batch_output = self._model.run(batch=batch_input, train=train, stream=stream)\n assert set(batch_input.keys()).isdisjoint(set(batch_output)), 'Batch inputs and outputs must not overlap.'\n\n with Timer('after_batch_hooks_{}'.format(stream.name), self._epoch_profile):\n batch_data = {**batch_input, **batch_output}\n for hook in self._hooks:\n hook.after_batch(stream_name=stream.name, batch_data=batch_data)\n if nonempty_batch_count == 0:\n if self._on_empty_stream == 'warn':\n logging.warning('Stream `%s` appears to be empty. Set `main_loop.on_empty_stream` to `ignore` in order '\n 'to suppress this warning.', stream.name)\n elif self._on_empty_stream == 'error':\n raise ValueError('Stream `{}` appears to be empty. Set '\n '`main_loop.on_empty_stream` to `warn` in order to change this error into warning; '\n 'set to `ignore` to remove it.'.format(stream.name))"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef train_by_stream(self, stream: StreamWrapper) -> None:\n self._run_epoch(stream=stream, train=True)", "response": "Train the model with the given stream."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef evaluate_stream(self, stream: StreamWrapper) -> None:\n self._run_epoch(stream=stream, train=False)", "response": "Evaluate the given stream."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function to\nget a stream with the given name.", "response": "def get_stream(self, stream_name: str) -> StreamWrapper:\n \"\"\"\n Get a :py:class:`StreamWrapper` with the given name.\n\n :param stream_name: stream name\n :return: dataset function name providing the respective stream\n :raise AttributeError: if the dataset does not provide the function creating the stream\n \"\"\"\n if stream_name not in self._streams:\n stream_fn_name = '{}_stream'.format(stream_name)\n try:\n stream_fn = getattr(self._dataset, stream_fn_name)\n stream_epoch_limit = -1\n if self._fixed_epoch_size is not None and stream_name == self._train_stream_name:\n stream_epoch_limit = self._fixed_epoch_size\n self._streams[stream_name] = StreamWrapper(stream_fn, buffer_size=self._buffer,\n epoch_size=stream_epoch_limit, name=stream_name,\n profile=self._epoch_profile)\n except AttributeError as ex:\n raise AttributeError('The dataset does not have a function for creating a stream named `{}`. '\n 'The function has to be named `{}`.'.format(stream_name, stream_fn_name)) from ex\n return self._streams[stream_name]"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nrunning zeroth epoch on the specified streams.", "response": "def _run_zeroth_epoch(self, streams: Iterable[str]) -> None:\n \"\"\"\n Run zeroth epoch on the specified streams.\n\n Calls\n - :py:meth:`cxflow.hooks.AbstractHook.after_epoch`\n\n :param streams: stream names to be evaluated\n \"\"\"\n for stream_name in streams:\n with self.get_stream(stream_name) as stream:\n self.evaluate_stream(stream)\n\n epoch_data = self._create_epoch_data(streams)\n for hook in self._hooks:\n hook.after_epoch(epoch_id=0, epoch_data=epoch_data)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _try_run(self, run_func: Callable[[], None]) -> None:\n # Initialization: before_training\n for hook in self._hooks:\n hook.before_training()\n\n try:\n run_func()\n except TrainingTerminated as ex:\n logging.info('Training terminated: %s', ex)\n\n # After training: after_training\n for hook in self._hooks:\n hook.after_training()", "response": "Try running the given function."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef run_training(self, trace: Optional[TrainingTrace]=None) -> None:\n for stream_name in [self._train_stream_name] + self._extra_streams:\n self.get_stream(stream_name)\n\n def training():\n logging.debug('Training started')\n self._epochs_done = 0\n\n # Zeroth epoch: after_epoch\n if not self._skip_zeroth_epoch:\n logging.info('Evaluating 0th epoch')\n self._run_zeroth_epoch([self._train_stream_name] + self._extra_streams)\n logging.info('0th epoch done\\n\\n')\n\n # Training loop: after_epoch, after_epoch_profile\n while True:\n epoch_id = self._epochs_done + 1\n logging.info('Training epoch %s', epoch_id)\n self._epoch_profile.clear()\n epoch_data = self._create_epoch_data()\n\n with self.get_stream(self._train_stream_name) as stream:\n self.train_by_stream(stream)\n\n for stream_name in self._extra_streams:\n with self.get_stream(stream_name) as stream:\n self.evaluate_stream(stream)\n\n with Timer('after_epoch_hooks', self._epoch_profile):\n for hook in self._hooks:\n hook.after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)\n\n for hook in self._hooks:\n hook.after_epoch_profile(epoch_id=epoch_id, profile=self._epoch_profile,\n train_stream_name=self._train_stream_name,\n extra_streams=self._extra_streams)\n self._epochs_done = epoch_id\n if trace is not None:\n trace[TrainingTraceKeys.EPOCHS_DONE] = self._epochs_done\n logging.info('Epoch %s done\\n\\n', epoch_id)\n\n self._try_run(training)", "response": "Runs the main training loop."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nrun the main loop with the given stream in the prediction mode.", "response": "def run_evaluation(self, stream_name: str) -> None:\n \"\"\"\n Run the main loop with the given stream in the prediction mode.\n\n :param stream_name: name of the stream to be evaluated\n \"\"\"\n def prediction():\n logging.info('Running prediction')\n self._run_zeroth_epoch([stream_name])\n logging.info('Prediction done\\n\\n')\n self._try_run(prediction)"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef major_vote(all_votes: Iterable[Iterable[Hashable]]) -> Iterable[Hashable]:\n return [Counter(votes).most_common()[0][0] for votes in zip(*all_votes)]", "response": "Returns an iterable of the most common objects in the given iterable of object iterations."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nloads all the models to be assembled together and save them to the self. _models attribute.", "response": "def _load_models(self) -> None:\n \"\"\"Maybe load all the models to be assembled together and save them to the ``self._models`` attribute.\"\"\"\n if self._models is None:\n logging.info('Loading %d models', len(self._model_paths))\n\n def load_model(model_path: str):\n logging.debug('\\tloading %s', model_path)\n if path.isdir(model_path):\n model_path = path.join(model_path, CXF_CONFIG_FILE)\n config = load_config(model_path)\n config['model']['inputs'] = self._inputs\n config['model']['outputs'] = self._outputs\n\n return create_model(config, output_dir=None, dataset=self._dataset,\n restore_from=path.dirname(model_path))\n\n self._models = list(map(load_model, self._model_paths))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nrun the feed - forward pass with the given batch using all the models and aggregate the outputs and return the results.", "response": "def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:\n \"\"\"\n Run feed-forward pass with the given batch using all the models, aggregate and return the results.\n\n .. warning::\n :py:class:`Ensemble` can not be trained.\n\n :param batch: batch to be processed\n :param train: ``True`` if this batch should be used for model update, ``False`` otherwise\n :param stream: stream wrapper (useful for precise buffer management)\n :return: aggregated results dict\n :raise ValueError: if the ``train`` flag is set to ``True``\n \"\"\"\n if train:\n raise ValueError('Ensemble model cannot be trained.')\n self._load_models()\n\n # run all the models\n batch_outputs = [model.run(batch, False, stream) for model in self._models]\n\n # aggregate the outputs\n aggregated = {}\n for output_name in self._outputs:\n output_values = [batch_output[output_name] for batch_output in batch_outputs]\n if self._aggregation == 'mean':\n aggregated[output_name] = np.mean(output_values, axis=0)\n elif self._aggregation == 'major_vote':\n output_values_arr = np.array(output_values)\n output = major_vote(output_values_arr.reshape((output_values_arr.shape[0], -1)))\n aggregated[output_name] = np.array(output).reshape(output_values_arr[0].shape)\n\n return aggregated"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef parse_fully_qualified_name(fq_name: str) -> Tuple[Optional[str], str]:\n last_dot = fq_name.rfind('.')\n if last_dot != -1:\n return fq_name[:last_dot], fq_name[last_dot + 1:]\n else:\n return None, fq_name", "response": "Parses a fully - qualified name into a tuple of module and class names."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ngets the specified module attribute.", "response": "def get_attribute(module_name: str, attribute_name: str):\n \"\"\"\n Get the specified module attribute. It most cases, it will be a class or function.\n\n :param module_name: module name\n :param attribute_name: attribute name\n :return: module attribute\n \"\"\"\n assert isinstance(module_name, str)\n assert isinstance(attribute_name, str)\n\n _module = importlib.import_module(module_name)\n return getattr(_module, attribute_name)"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT):\n return get_attribute(module_name, class_name)(*args, **kwargs)", "response": "Create an object from the given module and class."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index\n _module = importlib.import_module(module_name)\n return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)]", "response": "List all the submodules in the given module."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nfinds sub - modules of the given module that contain the given class.", "response": "def find_class_module(module_name: str, class_name: str) \\\n -> Tuple[List[str], List[Tuple[str, Exception]]]: # pylint: disable=invalid-sequence-index\n \"\"\"\n Find sub-modules of the given module that contain the given class.\n\n Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples.\n\n :param module_name: name of the module to be searched\n :param class_name: searched class name\n :return: a tuple of sub-modules having the searched class and sub-modules that could not be searched\n \"\"\"\n matched_submodules = []\n erroneous_submodules = []\n for submodule_name in list_submodules(module_name):\n try: # the sub-module to be included may be erroneous and we need to continue\n submodule = importlib.import_module(submodule_name)\n if hasattr(submodule, class_name):\n matched_submodules.append(submodule_name)\n except Exception as ex: # pylint: disable=broad-except\n erroneous_submodules.append((submodule_name, ex))\n return matched_submodules, erroneous_submodules"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nget a sub - module of the given module which has the given class.", "response": "def get_class_module(module_name: str, class_name: str) -> Optional[str]:\n \"\"\"\n Get a sub-module of the given module which has the given class.\n\n This method wraps `utils.reflection.find_class_module method` with the following behavior:\n\n - raise error when multiple sub-modules with different classes with the same name are found\n - return None when no sub-module is found\n - warn about non-searchable sub-modules\n\n .. note::\n This function logs!\n\n :param module_name: module to be searched\n :param class_name: searched class name\n :return: sub-module with the searched class or None\n \"\"\"\n matched_modules, erroneous_modules = find_class_module(module_name, class_name)\n\n for submodule, error in erroneous_modules:\n logging.warning('Could not inspect sub-module `%s` due to `%s` '\n 'when searching for `%s` in sub-modules of `%s`.',\n submodule, type(error).__name__, class_name, module_name)\n\n if len(matched_modules) == 1:\n return matched_modules[0]\n if len(matched_modules) > 1:\n # check if all the module attributes point to the same class\n first_class = getattr(importlib.import_module(matched_modules[0]), class_name)\n for matched_module in matched_modules:\n another_class = getattr(importlib.import_module(matched_module), class_name)\n if another_class is not first_class:\n raise ValueError('Found more than one sub-module when searching for `{}` in sub-modules of `{}`. '\n 'Please specify the module explicitly. Found sub-modules: `{}`'\n .format(class_name, module_name, matched_modules))\n return matched_modules[0]\n return None"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef get_cxflow_arg_parser(add_common_arguments: bool=False) -> ArgumentParser:\n # create parser\n main_parser = ArgumentParser('cxflow',\n description='cxflow: lightweight framework for machine learning with '\n 'focus on modularization, re-usability and rapid experimenting.',\n epilog='For more info see ')\n\n main_parser.add_argument('--version', action='version', help='Print cxflow version and quit.',\n version='cxflow {}'.format(pkg_resources.get_distribution('cxflow').version))\n subparsers = main_parser.add_subparsers(help='cxflow commands')\n\n # create train sub-parser\n train_parser = subparsers.add_parser('train', description='Start cxflow training from the ``config_file``.')\n train_parser.set_defaults(subcommand='train')\n train_parser.add_argument('config_file', help='path to the config file')\n\n # create resume sub-parser\n resume_parser = subparsers.add_parser('resume', description='Resume cxflow training from the ``config_path``.')\n resume_parser.set_defaults(subcommand='resume')\n resume_parser.add_argument('config_path', help='path to the config file or the directory in which it is stored')\n resume_parser.add_argument('restore_from', nargs='?', default=None,\n help='information passed to the model constructor (backend-specific); '\n 'usually a directory in which the trained model is stored')\n\n # create predict sub-parser (deprecated)\n predict_parser = subparsers.add_parser('predict', description='Run prediction with the given ``config_path``.')\n predict_parser.set_defaults(subcommand='predict')\n predict_parser.add_argument('config_path', help='path to the config file or the directory in which it is stored')\n predict_parser.add_argument('restore_from', nargs='?', default=None,\n help='information passed to the model constructor (backend-specific); usually a '\n 'directory in which the trained model is stored')\n\n # create eval sub-parser\n eval_parser = subparsers.add_parser('eval', description='Evaluate the given model on the specified data stream.')\n eval_parser.set_defaults(subcommand='eval')\n eval_parser.add_argument('stream_name', help='stream name to be evaluated')\n eval_parser.add_argument('model_path', help='model path to be evaluated')\n eval_parser.add_argument('--config', '-c', nargs='?', default=None, help='optional config path to be used')\n\n # create dataset sub-parser\n dataset_parser = subparsers.add_parser('dataset', description='Invoke arbitrary dataset method.')\n dataset_parser.set_defaults(subcommand='dataset')\n dataset_parser.add_argument('method', help='name of the method to be invoked')\n dataset_parser.add_argument('config_file', help='path to the config file')\n\n # create grid-search sub-parser\n gridsearch_parser = subparsers.add_parser('gridsearch', description='Do parameter grid search (experimental).')\n gridsearch_parser.set_defaults(subcommand='gridsearch')\n gridsearch_parser.add_argument('script', help='Script to be grid-searched')\n gridsearch_parser.add_argument('params', nargs='*', help='Params to be tested. Format: name:type=[value1,value2]. '\n 'Type is optional')\n gridsearch_parser.add_argument('--dry-run', action='store_true', help='Only print command output instead '\n 'of executing it right away')\n\n # create ls sub-parser\n ls_parser = subparsers.add_parser('ls', description='List training log dirs in the given path.')\n ls_parser.set_defaults(subcommand='ls')\n ls_parser.add_argument('dir', nargs='?', default=CXF_DEFAULT_LOG_DIR,\n help='path to the log directory to be listed')\n ls_parser.add_argument('-l', '--long', action='store_true', help='use long listing format')\n ls_parser.add_argument('-a', '--all', action='store_true', help='include trainings with no epochs done')\n ls_parser.add_argument('-r', '--recursive', action='store_true',\n help='list all the dirs recursively, stop at training dirs')\n ls_parser.add_argument('-v', '--verbose', action='store_true',\n help='print more verbose output, applicable only when a single train dir is listed')\n\n # create prune sub-parser\n prune_parser = subparsers.add_parser('prune', description='Prune training log dirs in the given path without finished epochs.')\n prune_parser.set_defaults(subcommand='prune')\n prune_parser.add_argument('dir', nargs='?', default=CXF_DEFAULT_LOG_DIR,\n help='path to the log directory to be pruned')\n prune_parser.add_argument('-e', '--epochs', default=1, type=int,\n help='keep only training log dirs having at least this many completed epochs, default 1')\n prune_parser.add_argument('-s', '--subdirs', action='store_true',\n help='delete all subdirectories in training directories')\n\n # add common arguments\n if add_common_arguments:\n for parser in [main_parser, train_parser, resume_parser, predict_parser, dataset_parser, eval_parser]:\n parser.add_argument('--output_root', '-o', default='./log', help='output directory')\n parser.add_argument('--verbose', '-v', action='store_true', help='increase verbosity to level DEBUG')\n\n return main_parser", "response": "Create the cxflow argument parser."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _enqueue_batches(self, stop_event: Event) -> None:\n while True:\n self._stream = self._get_stream()\n while True:\n # Acquire the semaphore before processing the next batch\n # but immediately release it so that other threads\n # are not blocked when they decide to acquire it again.\n with self._semaphore:\n pass\n # It always takes a short moment before the native call actually\n # releases the GIL and we are free to compute. The following sleep\n # is here to compensate for this short moment - we don't want to \n # slow down the native call before the GIL is released.\n time.sleep(CXF_BUFFER_SLEEP)\n try:\n batch = next(self._stream)\n except StopIteration:\n break\n self._queue.put(batch)\n self._batch_count += 1\n if stop_event.is_set():\n return\n if self._epoch_limit_reached():\n self._queue.put(None)\n self._batch_count = 0\n return\n self._stream = None # yield a new iterator next time\n if self._epoch_size <= 0: # for non-fixed size epochs\n self._queue.put(None)\n self._batch_count = 0\n return", "response": "Enqueue all the stream batches."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _dequeue_batch(self) -> Optional[Batch]:\n if self._enqueueing_thread is None:\n raise ValueError('StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.'\n .format(self._name, self._buffer_size))\n if not self._enqueueing_thread.is_alive() and self._queue.empty():\n self._start_thread()\n while True:\n try:\n batch = self._queue.get(timeout=2)\n self._queue.task_done()\n break\n except Empty:\n if not self._enqueueing_thread.is_alive():\n try:\n # the enqueueing thread may just finished properly so lets check the queue eagerly\n batch = self._queue.get_nowait()\n self._queue.task_done()\n break\n except Empty:\n # so we failed to retrieve a batch and the enqueueing thread is dead\n # there is no hope, something must went wrong\n raise ChildProcessError('Enqueueing thread ended unexpectedly.')\n return batch", "response": "Returns a single batch from the queue or None if the queue is empty."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _next_batch(self) -> Optional[Batch]:\n if self._epoch_limit_reached():\n self._batch_count = 0\n return None\n try:\n batch = next(self._get_stream())\n self._batch_count += 1\n return batch\n except StopIteration:\n self._stream = None # yield a new iterator next time\n if self._epoch_size > 0: # underlying stream ended but our fixed size epoch did not\n batch = next(self._get_stream()) # get another stream and return its 1st batch\n self._batch_count += 1\n return batch\n else:\n self._batch_count = 0\n return None", "response": "Returns the next batch or None if there are no more batches."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _start_thread(self):\n self._stopping_event = Event()\n self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,))\n self._enqueueing_thread.start()", "response": "Start an enqueueing thread."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _stop_thread(self):\n self._stopping_event.set()\n queue_content = []\n try: # give the enqueueing thread chance to put a batch to the queue and check the stopping event\n while True:\n queue_content.append(self._queue.get_nowait())\n except Empty:\n pass\n self._enqueueing_thread.join()\n try:\n queue_content.append(self._queue.get_nowait()) # collect the very last item\n except Empty:\n pass\n self._queue = Queue(max(len(queue_content), self._buffer_size)) # queue content may be bigger than queue size\n for batch in queue_content:\n self._queue.put(batch)", "response": "Stop the enqueueing thread. Keep the queue content and stream state."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nsave the model every n_epochs epoch.", "response": "def _after_n_epoch(self, epoch_id: int, **_) -> None:\n \"\"\"\n Save the model every ``n_epochs`` epoch.\n\n :param epoch_id: number of the processed epoch\n \"\"\"\n SaveEvery.save_model(model=self._model, name_suffix=str(epoch_id), on_failure=self._on_save_failure)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef save_model(model: AbstractModel, name_suffix: str, on_failure: str) -> None:\n try:\n logging.debug('Saving the model')\n save_path = model.save(name_suffix)\n logging.info('Model saved to: %s', save_path)\n except Exception as ex: # pylint: disable=broad-except\n if on_failure == 'error':\n raise IOError('Failed to save the model.') from ex\n elif on_failure == 'warn':\n logging.warning('Failed to save the model.')", "response": "Save the given model with the given name_suffix. On failure take the specified action."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _get_value(self, epoch_data: EpochData) -> float:\n if self._stream_name not in epoch_data:\n raise KeyError('Stream `{}` was not found in the epoch data.\\nAvailable streams are `{}`.'\n .format(self._stream_name, epoch_data.keys()))\n\n stream_data = epoch_data[self._stream_name]\n if self._variable not in stream_data:\n raise KeyError('Variable `{}` for stream `{}` was not found in the epoch data. '\n 'Available variables for stream `{}` are `{}`.'\n .format(self._variable, self._stream_name, self._stream_name, stream_data.keys()))\n\n value = stream_data[self._variable]\n if self._aggregation:\n if not isinstance(value, dict):\n raise TypeError('Variable `{}` is expected to be a dict when aggregation is specified. '\n 'Got `{}` instead.'.format(self._variable, type(value).__name__))\n if self._aggregation not in value:\n raise KeyError('Specified aggregation `{}` was not found in the variable `{}`. '\n 'Available aggregations: `{}`.'.format(self._aggregation, self._variable, value.keys()))\n value = value[self._aggregation]\n if not np.isscalar(value):\n raise ValueError('Variable `{}` value is not a scalar.'.format(value))\n\n return value", "response": "Retrieve the value of the monitored variable from the given epoch data."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef _is_value_better(self, new_value: float) -> bool:\n if self._best_value is None:\n return True\n if self._condition == 'min':\n return new_value < self._best_value\n if self._condition == 'max':\n return new_value > self._best_value", "response": "Test if the new value is better than the best so far."} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef after_epoch(self, epoch_data: EpochData, **_) -> None:\n new_value = self._get_value(epoch_data)\n\n if self._is_value_better(new_value):\n self._best_value = new_value\n SaveEvery.save_model(model=self._model, name_suffix=self._OUTPUT_NAME, on_failure=self._on_save_failure)", "response": "Save the model if the new value of the monitored variable is better than the best value so far."} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nprints a progress bar with the given prefix and suffix.", "response": "def print_progress_bar(done: int, total: int, prefix: str = '', suffix: str = '') -> None:\n \"\"\"\n Print a progressbar with the given prefix and suffix, without newline at the end.\n\n param done: current step in computation\n param total: total count of steps in computation\n param prefix: info text displayed before the progress bar\n param suffix: info text displayed after the progress bar\n \"\"\"\n\n percent = '{0:.1f}'.format(100 * (done / float(total)))\n base_len = shutil.get_terminal_size().columns - 7 - len(prefix) - len(suffix)\n base_len = min([base_len, 50])\n min_length = base_len - 1 - len('{}/{}={}'.format(total, total, '100.0'))\n length = base_len - len('{}/{}={}'.format(done, total, percent))\n if min_length > 0:\n filled_len = int(min_length * done // total)\n bar = '='*filled_len + '-'*(min_length - filled_len)\n spacing = ' '*(length - min_length)\n print('\\r{}: |{}|{}{}/{}={}% {}'.format(prefix, bar, spacing, done, total, percent, suffix), end='\\r')\n else:\n short_progress = '\\r{}: {}/{}'.format(prefix, done, total)\n if len(short_progress) <= shutil.get_terminal_size().columns:\n print(short_progress, end='\\r')\n else:\n print(['-', '\\\\', '|', '/'][done % 4], end='\\r')"} {"SOURCE": "codesearchnet", "instruction": "Can you implement a function in Python 3 that\nconverts seconds to the time format H M S. UU.", "response": "def get_formatted_time(seconds: float) -> str:\n \"\"\"\n Convert seconds to the time format ``H:M:S.UU``.\n\n :param seconds: time in seconds\n :return: formatted human-readable time\n \"\"\"\n seconds = round(seconds)\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n return '{:d}:{:02d}:{:02d}'.format(h, m, s)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\nprinting the current batch count and progress bar for the current stream.", "response": "def after_batch(self, stream_name: str, batch_data: Batch) -> None:\n \"\"\"\n Display the progress and ETA for the current stream in the epoch.\n If the stream size (total batch count) is unknown (1st epoch), print only the number of processed batches.\n \"\"\"\n if self._current_stream_name is None or self._current_stream_name != stream_name:\n self._current_stream_name = stream_name\n self._current_stream_start = None\n erase_line()\n\n self._current_batch_count[stream_name] += 1\n current_batch = self._current_batch_count[stream_name]\n\n # total batch count is available\n if stream_name in self._total_batch_count:\n # compute ETA\n total_batches = self._total_batch_count[stream_name]\n if self._current_stream_start:\n measured_batches = current_batch - 1\n avg_batch_time = (time.time() - self._current_stream_start) / measured_batches\n eta_sec = avg_batch_time * (total_batches - current_batch)\n eta = get_formatted_time(eta_sec)\n else:\n self._current_stream_start = time.time()\n eta = ''\n\n print_progress_bar(current_batch, total_batches, prefix=stream_name, suffix=eta)\n\n # total batch count is not available (1st epoch)\n else:\n short_progress = '{}: {}'.format(stream_name, current_batch)\n if len(short_progress) <= shutil.get_terminal_size().columns:\n print(short_progress, end='\\r')\n else:\n print(['-', '\\\\', '|', '/'][current_batch % 4], end='\\r')"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef after_epoch(self, **_) -> None:\n if not self._total_batch_count_saved:\n self._total_batch_count = self._current_batch_count.copy()\n self._total_batch_count_saved = True\n self._current_batch_count.clear()\n self._current_stream_start = None\n self._current_stream_name = None\n erase_line()", "response": "Reset the internal state of the internal state of the class."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef after_epoch_profile(self, epoch_id, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:\n\n read_data_total = 0\n eval_total = 0\n train_total = sum(profile.get('eval_batch_{}'.format(train_stream_name), []))\n hooks_total = sum(profile.get('after_epoch_hooks', []))\n\n for stream_name in chain(extra_streams, [train_stream_name]):\n read_data_total += sum(profile.get('read_batch_' + stream_name, []))\n hooks_total += sum(profile.get('after_batch_hooks_' + stream_name, []))\n if stream_name != train_stream_name:\n eval_total += sum(profile.get('eval_batch_' + stream_name, []))\n\n logging.info('\\tT read data:\\t%f', read_data_total)\n logging.info('\\tT train:\\t%f', train_total)\n logging.info('\\tT eval:\\t%f', eval_total)\n logging.info('\\tT hooks:\\t%f', hooks_total)", "response": "Summarize and log the given epoch profile."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef confusion_matrix(expected: np.ndarray, predicted: np.ndarray, num_classes: int) -> np.ndarray:\n assert np.issubclass_(expected.dtype.type, np.integer), \" Classes' indices must be integers\"\n assert np.issubclass_(predicted.dtype.type, np.integer), \" Classes' indices must be integers\"\n assert expected.shape == predicted.shape, \"Predicted and expected data must be the same length\"\n assert num_classes > np.max([predicted, expected]), \\\n \"Number of classes must be at least the number of indices in predicted/expected data\"\n assert np.min([predicted, expected]) >= 0, \" Classes' indices must be positive integers\"\n cm_abs = np.zeros((num_classes, num_classes), dtype=np.int32)\n for pred, exp in zip(predicted, expected):\n cm_abs[exp, pred] += 1\n return cm_abs", "response": "Calculate and return the confusion matrix for the predicted and expected labels."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]:\n\n param_space = OrderedDict()\n for arg in params:\n assert '=' in arg\n\n name = arg[:arg.index('=')]\n options = arg[arg.index('=') + 1:]\n options = ast.literal_eval(options)\n assert isinstance(options, list), options\n\n param_space[name] = options\n\n param_names = param_space.keys()\n commands = []\n for values in itertools.product(*[param_space[name] for name in param_names]):\n command = str(script).split()\n for name, value in zip(param_names, values):\n command.append(str(name) + '=\"' + str(value) + '\"')\n commands.append(command)\n\n return commands", "response": "Builds all grid search commands."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nbuild all grid search parameters and optionally run them.", "response": "def grid_search(script: str, params: typing.Iterable[str], dry_run: bool=False) -> None:\n \"\"\"\n Build all grid search parameter configurations and optionally run them.\n\n :param script: String of command prefix, e.g. ``cxflow train -v -o log``.\n :param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``\n or ``'text_param=[\"hello\", \"cio\"]'``.\n :param dry_run: If set to ``True``, the built commands will only be printed instead of executed.\n \"\"\"\n\n commands = _build_grid_search_commands(script=script, params=params)\n\n if dry_run:\n logging.warning('Dry run')\n for command in commands:\n logging.info(command)\n else:\n for command in commands:\n try:\n completed_process = subprocess.run(command)\n logging.info('Command `%s` completed with exit code %d', command, completed_process.returncode)\n except Exception as _: # pylint: disable=broad-except\n logging.error('Command `%s` failed.', command)"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef stream_info(self) -> None:\n stream_names = [stream_name for stream_name in dir(self)\n if 'stream' in stream_name and stream_name != 'stream_info']\n logging.info('Found %s stream candidates: %s', len(stream_names), stream_names)\n for stream_name in stream_names:\n try:\n stream_fn = getattr(self, stream_name)\n logging.info(stream_name)\n batch = next(iter(stream_fn()))\n rows = []\n for key, value in batch.items():\n try:\n value_arr = np.array(value)\n row = [key, value_arr.dtype, value_arr.shape]\n if value_arr.dtype.kind in 'bui': # boolean, unsigned, integer\n row.append('{} - {}'.format(value_arr.min(), value_arr.max()))\n elif value_arr.dtype.kind is 'f':\n row.append('{0:.2f} - {1:.2f}'.format(value_arr.min(), value_arr.max()))\n except ValueError: # np broadcasting failed (ragged array)\n value_arr = None\n row = [key, '{}'.format(type(value[0]).__name__), '({},)'.format(len(list(value)))]\n\n if value_arr is None or \\\n (value_arr.ndim > 0 and value_arr.shape[1:] != np.array(value_arr[0]).shape):\n logging.warning('*** stream source `%s` appears to be ragged (non-rectangular) ***', key)\n\n rows.append(row)\n for line in tabulate.tabulate(rows, headers=['name', 'dtype', 'shape', 'range'],\n tablefmt='grid').split('\\n'):\n logging.info(line)\n except Exception:\n logging.warning('Exception was raised during checking stream `%s`, '\n '(stack trace is displayed only with --verbose flag)', stream_name)\n logging.debug(traceback.format_exc())", "response": "Check and report source names dtypes and shapes of all the streams available."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef parse_arg(arg: str) -> typing.Tuple[str, typing.Any]:\n assert '=' in arg, 'Unrecognized argument `{}`. [name]=[value] expected.'.format(arg)\n\n key = arg[:arg.index('=')]\n value = yaml.load(arg[arg.index('=') + 1:])\n\n return key, value", "response": "Parse a string in format key = value to tuple"} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef load_config(config_file: str, additional_args: typing.Iterable[str]=()) -> dict:\n\n config = load_yaml(config_file)\n\n for key_full, value in [parse_arg(arg) for arg in additional_args]:\n key_split = key_full.split('.')\n key_prefix = key_split[:-1]\n key = key_split[-1]\n\n conf = config\n for key_part in key_prefix:\n conf = conf[key_part]\n conf[key] = value\n\n return reload(config)", "response": "Load config from YAML config_file and extend or override it with the given additional_args."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef find_config(config_path: str) -> str:\n if path.isdir(config_path): # dir specified instead of config file\n config_path = path.join(config_path, CXF_CONFIG_FILE)\n assert path.exists(config_path), '`{}` does not exist'.format(config_path)\n return config_path", "response": "Derive configuration file path from the given path and check its existence."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef fallback(message: str, ex: Exception) -> None:\n logging.error('%s', message)\n logging.exception('%s', ex)\n sys.exit(1)", "response": "Fallback procedure when a cli command fails."} {"SOURCE": "codesearchnet", "instruction": "Here you have a function in Python 3, explain what it does\ndef _configure_dataset(self, data_root: str=None, download_urls: Iterable[str]=None, **kwargs) -> None:\n self._data_root = data_root\n self._download_urls = download_urls", "response": "Configure the internal properties of the object."} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:\n config = None\n\n try:\n config_path = find_config(config_path)\n restore_from = restore_from or path.dirname(config_path)\n config = load_config(config_file=config_path, additional_args=cl_arguments)\n\n validate_config(config)\n\n logging.debug('\\tLoaded config: %s', config)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Loading config failed', ex)\n\n run(config=config, output_root=output_root, restore_from=restore_from)", "response": "Resume training from the specified directory and start training."} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:\n pass", "response": "This event provides opportunity to process time profile of the finished epoch."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nloads the YAML file.", "response": "def load_yaml(yaml_file: str) -> Any:\n \"\"\"\n Load YAML from file.\n\n :param yaml_file: path to YAML file\n :return: content of the YAML as dict/list\n \"\"\"\n with open(yaml_file, 'r') as file:\n return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script to\nsave the given dict to a file in YAML format.", "response": "def yaml_to_file(data: Mapping, output_dir: str, name: str) -> str:\n \"\"\"\n Save the given object to the given path in YAML.\n\n :param data: dict/list to be dumped\n :param output_dir: target output directory\n :param name: target filename\n :return: target path\n \"\"\"\n dumped_config_f = path.join(output_dir, name)\n with open(dumped_config_f, 'w') as file:\n yaml.dump(data, file, Dumper=ruamel.yaml.RoundTripDumper)\n return dumped_config_f"} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef yaml_to_str(data: Mapping) -> str:\n return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper)", "response": "Return the given configuration as YAML str."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef make_simple(data: Any) -> Any:\n return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.Loader)", "response": "Make a new node with simple data."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef reload(data: Any) -> Any:\n return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.RoundTripLoader)", "response": "Dump and load yaml data."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\ncall after the epoch has been completed.", "response": "def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:\n \"\"\"\n Call :py:meth:`_on_plateau_action` if the ``long_term``\n variable mean is lower/greater than the ``short_term`` mean.\n \"\"\"\n\n super().after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)\n\n self._saved_loss.append(epoch_data[self._stream][self._variable][OnPlateau._AGGREGATION])\n\n long_mean = np.mean(self._saved_loss[-self._long_term:])\n short_mean = np.mean(self._saved_loss[-self._short_term:])\n if self._objective == 'min' and long_mean < short_mean:\n self._on_plateau_action(epoch_id=epoch_id, epoch_data=epoch_data)\n elif self._objective == 'max' and long_mean > short_mean:\n self._on_plateau_action(epoch_id=epoch_id, epoch_data=epoch_data)"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nraises an exception when some of the monitored data is NaN.", "response": "def _check_nan(self, epoch_data: EpochData) -> None:\n \"\"\"\n Raise an exception when some of the monitored data is NaN.\n\n :param epoch_data: epoch data checked\n :raise KeyError: if the specified variable is not found in the stream\n :raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``\n \"\"\"\n for stream_name in epoch_data.keys():\n stream_data = epoch_data[stream_name]\n variables = self._variables if self._variables is not None else stream_data.keys()\n for variable in variables:\n if variable not in stream_data:\n raise KeyError('Variable `{}` to be nan-checked was not found in the batch data for stream `{}`. '\n 'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))\n\n value = stream_data[variable]\n if self._is_nan(variable, value):\n raise TrainingTerminated('Variable `{}` is NaN.'.format(variable))"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nstopping training once the epoch data contains a monitored variable equal to NaN.", "response": "def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:\n \"\"\"\n If initialized to check after each epoch, stop the training once the epoch data contains a monitored\n variable equal to NaN.\n\n :param epoch_data: epoch data to be checked\n \"\"\"\n\n if self._after_epoch:\n self._check_nan(epoch_data)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\nstopping training once the batch data contains a monitored variable equal to NaN.", "response": "def after_batch(self, stream_name: str, batch_data) -> None:\n \"\"\"\n If initialized to check after each batch, stop the training once the batch data contains a monitored\n variable equal to NaN.\n\n :param stream_name: name of the stream to be checked\n :param batch_data: batch data to be checked\n \"\"\"\n\n if self._after_batch:\n self._check_nan({stream_name: batch_data})"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function for\ncalling _after_n_epochs method every n_epochs epoch.", "response": "def after_epoch(self, epoch_id: int, **kwargs) -> None:\n \"\"\"\n Call ``_after_n_epoch`` method every ``n_epochs`` epoch.\n\n :param epoch_id: number of the processed epoch\n \"\"\"\n if epoch_id % self._n_epochs == 0:\n self._after_n_epoch(epoch_id=epoch_id, **kwargs)"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef path_total_size(path_: str) -> int:\n if path.isfile(path_):\n return path.getsize(path_)\n total_size = 0\n for root_dir, _, files in os.walk(path_):\n for file_ in files:\n total_size += path.getsize(path.join(root_dir, file_))\n return total_size", "response": "Compute total size of the given file or directory."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nreturns human readable size and unit from the given filesize in bytes.", "response": "def humanize_filesize(filesize: int) -> Tuple[str, str]:\n \"\"\"Return human readable pair of size and unit from the given filesize in bytes.\"\"\"\n for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if filesize < 1024.0:\n return '{:3.1f}'.format(filesize), unit+'B'\n filesize /= 1024.0"} {"SOURCE": "codesearchnet", "instruction": "Can you tell what is the following Python 3 function doing\ndef is_train_dir(dir_: str) -> bool:\n return path.exists(path.join(dir_, CXF_CONFIG_FILE)) and \\\n path.exists(path.join(dir_, CXF_TRACE_FILE)) and \\\n path.exists(path.join(dir_, CXF_LOG_FILE))", "response": "Test if the given dir contains training artifacts."} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\nwalks the train dirs and returns a generator of tuples.", "response": "def walk_train_dirs(root_dir: str) -> Iterable[Tuple[str, Iterable[str]]]:\n \"\"\"\n Modify os.walk with the following:\n - return only root_dir and sub-dirs\n - return only training sub-dirs\n - stop recursion at training dirs\n\n :param root_dir: root dir to be walked\n :return: generator of (root_dir, training sub-dirs) pairs\n \"\"\"\n if is_train_dir(root_dir):\n yield '', [root_dir]\n return\n for dir_, subdirs, _ in os.walk(root_dir, topdown=True):\n # filter train sub-dirs\n train_subdirs = [subdir for subdir in subdirs if is_train_dir(path.join(dir_, subdir))]\n\n # stop the recursion at the train sub-dirs\n for subdir in train_subdirs:\n subdirs.remove(subdir)\n\n yield dir_, train_subdirs"} {"SOURCE": "codesearchnet", "instruction": "Explain what the following Python 3 code does\ndef _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None:\n long_table = []\n for train_dir, config, trace in trainings:\n start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]\n if start_datetime:\n age = format_timedelta(datetime.now() - start_datetime) + ' ago'\n if end_datetime:\n duration = format_timedelta(end_datetime - start_datetime)\n else:\n duration = CXF_NA_STR\n else:\n age = CXF_NA_STR\n duration = CXF_NA_STR\n\n epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0\n\n long_table.append([path.basename(train_dir)] +\n list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +\n [age, duration, epochs_done])\n\n print(tabulate(long_table, tablefmt='plain'))", "response": "Print a plain table with the details of the given trainings."} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nprinting names of the train dirs contained in the given dir.", "response": "def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:\n \"\"\"\n Print names of the train dirs contained in the given dir.\n\n :param dir_: dir to be listed\n :param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)\n :param all_: include train dirs with no epochs done (--all option)\n :param long: list more details including model name, model and dataset classes,\n age, duration and epochs done (--long option)\n :return: list of found training tuples (train_dir, configuration dict, trace)\n \"\"\"\n all_trainings = []\n for root_dir, train_dirs in walk_train_dirs(dir_):\n if train_dirs:\n if recursive:\n print(root_dir + ':')\n trainings = [(train_dir,\n load_config(path.join(train_dir, CXF_CONFIG_FILE), []),\n TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))\n for train_dir\n in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]\n if not all_:\n trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]\n if long:\n print('total {}'.format(len(trainings)))\n _print_trainings_long(trainings)\n else:\n for train_dir, _, _ in trainings:\n print(path.basename(train_dir))\n all_trainings.extend(trainings)\n if recursive:\n print()\n\n if not recursive:\n break\n return all_trainings"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\nprinting trainings summary. In particular print tables summarizing the number of trainings with - particular model names - particular combinations of models and datasets :param all_trainings: a list of training tuples (train_dir, configuration dict, trace)", "response": "def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:\n \"\"\"\n Print trainings summary.\n In particular print tables summarizing the number of trainings with\n - particular model names\n - particular combinations of models and datasets\n\n :param all_trainings: a list of training tuples (train_dir, configuration dict, trace)\n \"\"\"\n counts_by_name = defaultdict(int)\n counts_by_classes = defaultdict(int)\n for _, config, _ in all_trainings:\n counts_by_name[get_model_name(config)] += 1\n counts_by_classes[get_classes(config)] += 1\n\n print_boxed('summary')\n print()\n\n counts_table = [[name, count] for name, count in counts_by_name.items()]\n print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))\n print()\n\n counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]\n print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))\n print()"} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _ls_print_verbose(training: Tuple[str, dict, str]) -> None:\n train_dir, config, _ = training\n print_boxed('config')\n print(yaml_to_str(config))\n print()\n\n print_boxed('artifacts')\n _, dirs, files = next(os.walk(train_dir))\n artifacts = [('d', dir) for dir in dirs] + \\\n [('-', file_) for file_ in files if file_ not in [CXF_CONFIG_FILE, CXF_LOG_FILE, CXF_TRACE_FILE]]\n artifacts = [(type_, name) + humanize_filesize(path_total_size(path.join(train_dir, name)))\n for type_, name in artifacts]\n print(tabulate(artifacts, tablefmt='plain', floatfmt='3.1f'))\n print()", "response": "Print config and artifacts info from the given training tuple."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\nlists training dirs contained in the given dir.", "response": "def list_train_dirs(dir_: str, recursive: bool, all_: bool, long: bool, verbose: bool) -> None:\n \"\"\"\n List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.\n The function is accessible through cxflow CLI `cxflow ls`.\n\n :param dir_: dir to be listed\n :param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)\n :param all_: include train dirs with no epochs done (--all option)\n :param long: list more details including model name, odel and dataset class,\n age, duration and epochs done (--long option)\n :param verbose: print more verbose output with list of additional artifacts and training config,\n applicable only when a single train dir is listed (--verbose option)\n \"\"\"\n if verbose:\n long = True\n\n if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):\n print('The default log directory `{}` does not exist.\\n'\n 'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))\n quit(1)\n\n if not path.exists(dir_):\n print('Specified dir `{}` does not exist'.format(dir_))\n quit(1)\n\n all_trainings = _ls_print_listing(dir_, recursive, all_, long)\n\n if long and len(all_trainings) > 1:\n if not recursive:\n print()\n _ls_print_summary(all_trainings)\n\n if verbose and len(all_trainings) == 1:\n if not recursive:\n print()\n _ls_print_verbose(all_trainings[0])"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\nextends the accumulated variables with the given batch data.", "response": "def after_batch(self, stream_name: str, batch_data: Batch):\n \"\"\"\n Extend the accumulated variables with the given batch data.\n\n :param stream_name: stream name; e.g. ``train`` or any other...\n :param batch_data: batch data = stream sources + model outputs\n :raise KeyError: if the variables to be aggregated are missing\n :raise TypeError: if the variable value is not iterable (e.g. it is only a scalar)\n \"\"\"\n for variable in self._variables:\n if variable in batch_data:\n value = batch_data[variable]\n if not hasattr(value, '__iter__'):\n raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))\n self._accumulator[stream_name][variable] += list(value)\n else:\n raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '\n 'Available variables are `{}`.'.format(variable, batch_data.keys()))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate the documentation for the following Python 3 function\ndef invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None:\n\n config = dataset = method = output_dir = None\n\n try:\n config_path = find_config(config_path)\n config = load_config(config_file=config_path, additional_args=cl_arguments)\n assert 'dataset' in config, '`dataset` section not present in the config'\n logging.debug('\\tLoaded config: %s', config)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Loading config failed', ex)\n\n try:\n dataset = create_dataset(config)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Creating dataset failed', ex)\n\n try:\n method = getattr(dataset, method_name)\n except AttributeError as ex:\n fallback('Method `{}` not found in the dataset'.format(method_name), ex)\n\n try:\n method()\n except Exception as ex: # pylint: disable=broad-except\n fallback('Exception occurred during method `{}` invocation'.format(method_name), ex)", "response": "Create the specified dataset and invoke the specified method."} {"SOURCE": "codesearchnet", "instruction": "Make a summary of the following Python 3 code\ndef _signal_handler(self, *_) -> None:\n if self._num_signals == 0:\n logging.warning('Interrupt signal caught - training will be terminated')\n logging.warning('Another interrupt signal will terminate the program immediately')\n self._num_signals += 1\n else:\n logging.error('Another interrupt signal caught - terminating program immediately')\n sys.exit(2)", "response": "Handles the interrupt signal."} {"SOURCE": "codesearchnet", "instruction": "How would you explain what the following Python 3 function does\ndef create_output_dir(config: dict, output_root: str, default_model_name: str='Unnamed') -> str:\n logging.info('Creating output dir')\n\n # create output dir\n model_name = default_model_name\n if 'name' not in config['model']:\n logging.warning('\\tmodel.name not found in config, defaulting to: %s', model_name)\n else:\n model_name = config['model']['name']\n\n if not os.path.exists(output_root):\n logging.info('\\tOutput root folder \"%s\" does not exist and will be created', output_root)\n os.makedirs(output_root)\n\n # keep trying to create new output dir until it succeeds\n # this is neccessary due to improbable yet possible output dir name conflicts\n while True:\n try:\n output_dir = path.join(output_root, '{}_{}_{}'.format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),\n model_name, get_random_name()))\n os.mkdir(output_dir)\n break\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise ex\n logging.info('\\tOutput dir: %s', output_dir)\n\n # create file logger\n file_handler = logging.FileHandler(path.join(output_dir, CXF_LOG_FILE))\n file_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))\n logging.getLogger().addHandler(file_handler)\n\n return output_dir", "response": "Create output_dir under the given output_root and create file logger logging to a file under this output_dir"} {"SOURCE": "codesearchnet", "instruction": "Create a Python 3 function for\ncreating a dataset object according to the given config.", "response": "def create_dataset(config: dict, output_dir: Optional[str]=None) -> AbstractDataset:\n \"\"\"\n Create a dataset object according to the given config.\n\n Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.\n\n :param config: config dict with dataset config\n :param output_dir: path to the training output dir or None\n :return: dataset object\n \"\"\"\n logging.info('Creating dataset')\n\n dataset_config = make_simple(config)['dataset']\n assert 'class' in dataset_config, '`dataset.class` not present in the config'\n dataset_module, dataset_class = parse_fully_qualified_name(dataset_config['class'])\n\n if 'output_dir' in dataset_config:\n raise ValueError('The `output_dir` key is reserved and can not be used in dataset configuration.')\n\n dataset_config = {'output_dir': output_dir, **config['dataset']}\n del dataset_config['class']\n\n dataset = create_object(dataset_module, dataset_class, args=(yaml_to_str(dataset_config),))\n logging.info('\\t%s created', type(dataset).__name__)\n return dataset"} {"SOURCE": "codesearchnet", "instruction": "Can you write a function in Python 3 where it\ncreates a model object from the checkpoint in output_dir.", "response": "def create_model(config: dict, output_dir: Optional[str], dataset: AbstractDataset,\n restore_from: Optional[str]=None) -> AbstractModel:\n \"\"\"\n Create a model object either from scratch of from the checkpoint in ``resume_dir``.\n\n Cxflow allows the following scenarios\n\n 1. Create model: leave ``restore_from=None`` and specify ``class``;\n 2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.\n\n :param config: config dict with model config\n :param output_dir: path to the training output dir\n :param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept\n :param restore_from: from whence the model should be restored (backend-specific information)\n :return: model object\n \"\"\"\n\n logging.info('Creating a model')\n\n model_config = config['model']\n\n # workaround for ruamel.yaml expansion bug; see #222\n model_config = dict(model_config.items())\n\n assert 'class' in model_config, '`model.class` not present in the config'\n model_module, model_class = parse_fully_qualified_name(model_config['class'])\n\n # create model kwargs (without `class` and `name`)\n model_kwargs = {'dataset': dataset, 'log_dir': output_dir, 'restore_from': restore_from, **model_config}\n del model_kwargs['class']\n if 'name' in model_kwargs:\n del model_kwargs['name']\n\n try:\n model = create_object(model_module, model_class, kwargs=model_kwargs)\n except (ImportError, AttributeError) as ex:\n if restore_from is None: # training case\n raise ImportError('Cannot create model from the specified model module `{}` and class `{}`.'.format(\n model_module, model_class)) from ex\n\n else: # restore cases (resume, predict)\n logging.warning('Cannot create model from the specified model class `%s`.', model_config['class'])\n assert 'restore_fallback' in model_config, '`model.restore_fallback` not present in the config'\n logging.info('Trying to restore with fallback `%s` instead.', model_config['restore_fallback'])\n\n try: # try fallback class\n fallback_module, fallback_class = parse_fully_qualified_name(model_config['restore_fallback'])\n model = create_object(fallback_module, fallback_class, kwargs=model_kwargs)\n except (ImportError, AttributeError) as ex: # if fallback module/class specified but it fails\n raise ImportError('Cannot create model from the specified restore_fallback `{}`.'.format(\n model_config['restore_fallback'],)) from ex\n\n logging.info('\\t%s created', type(model).__name__)\n return model"} {"SOURCE": "codesearchnet", "instruction": "How would you implement a function in Python 3 that\ncreates hooks in the order they appear in the config.", "response": "def create_hooks(config: dict, model: AbstractModel,\n dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]:\n \"\"\"\n Create hooks specified in ``config['hooks']`` list.\n\n Hook config entries may be one of the following types:\n\n .. code-block:: yaml\n :caption: A hook with default args specified only by its name as a string; e.g.\n\n hooks:\n - LogVariables\n - cxflow_tensorflow.WriteTensorBoard\n\n .. code-block:: yaml\n :caption: A hook with custom args as a dict name -> args; e.g.\n\n hooks:\n - StopAfter:\n n_epochs: 10\n\n :param config: config dict\n :param model: model object to be passed to the hooks\n :param dataset: dataset object to be passed to hooks\n :param output_dir: training output dir available to the hooks\n :return: list of hook objects\n \"\"\"\n logging.info('Creating hooks')\n hooks = []\n if 'hooks' in config:\n for hook_config in config['hooks']:\n if isinstance(hook_config, str):\n hook_config = {hook_config: {}}\n assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'\n\n hook_path, hook_params = next(iter(hook_config.items()))\n if hook_params is None:\n logging.warning('\\t\\t Empty config of `%s` hook', hook_path)\n hook_params = {}\n\n # workaround for ruamel.yaml expansion bug; see #222\n hook_params = dict(hook_params.items())\n\n hook_module, hook_class = parse_fully_qualified_name(hook_path)\n # find the hook module if not specified\n if hook_module is None:\n hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)\n logging.debug('\\tFound hook module `%s` for class `%s`', hook_module, hook_class)\n if hook_module is None:\n raise ValueError('Can`t find hook module for hook class `{}`. '\n 'Make sure it is defined under `{}` sub-modules.'\n .format(hook_class, CXF_HOOKS_MODULE))\n # create hook kwargs\n hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}\n\n # create new hook\n try:\n hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)\n hooks.append(hook)\n logging.info('\\t%s created', type(hooks[-1]).__name__)\n except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:\n logging.error('\\tFailed to create a hook from config `%s`', hook_config)\n raise ex\n return hooks"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef run(config: dict, output_root: str, restore_from: str=None, eval: Optional[str]=None) -> None:\n\n output_dir = dataset = model = hooks = main_loop = None\n\n try:\n output_dir = create_output_dir(config=config, output_root=output_root)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Failed to create output dir', ex)\n\n try:\n dataset = create_dataset(config=config, output_dir=output_dir)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Creating dataset failed', ex)\n\n try:\n model = create_model(config=config, output_dir=output_dir, dataset=dataset, restore_from=restore_from)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Creating model failed', ex)\n\n try: # save the config to file\n # modify the config so that it contains fallback information\n config['model']['restore_fallback'] = model.restore_fallback\n yaml_to_file(data=config, output_dir=output_dir, name=CXF_CONFIG_FILE)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Saving config failed', ex)\n\n try:\n hooks = create_hooks(config=config, model=model, dataset=dataset, output_dir=output_dir)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Creating hooks failed', ex)\n\n try:\n logging.info('Creating main loop')\n kwargs = config['main_loop'] if 'main_loop' in config else {}\n if eval is not None:\n kwargs['extra_streams'] = []\n main_loop = MainLoop(model=model, dataset=dataset, hooks=hooks, **kwargs)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Creating main loop failed', ex)\n\n if eval is not None:\n try:\n with main_loop:\n logging.info('Running the evaluation of stream `%s`', eval)\n main_loop.run_evaluation(eval)\n except Exception as ex: # pylint: disable=broad-except\n fallback('Running the evaluation failed', ex)\n else:\n trace = TrainingTrace(output_dir)\n try:\n with main_loop:\n logging.info('Running the training')\n trace[TrainingTraceKeys.TRAIN_BEGIN] = datetime.now()\n main_loop.run_training(trace)\n trace[TrainingTraceKeys.EXIT_STATUS] = 0\n except Exception as ex: # pylint: disable=broad-except\n trace[TrainingTraceKeys.EXIT_STATUS] = 1\n fallback('Running the training failed', ex)\n except SystemExit as ex:\n trace[TrainingTraceKeys.EXIT_STATUS] = ex.code\n finally:\n trace[TrainingTraceKeys.EPOCHS_DONE] = main_loop.epochs_done\n trace[TrainingTraceKeys.TRAIN_END] = datetime.now()", "response": "Run the CXFlow training."} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef _safe_rmtree(dir_: str):\n try:\n rmtree(dir_)\n except OSError:\n logging.warning('\\t\\t Skipping %s due to OSError', dir_)\n else:\n logging.debug('\\t\\t Deleted %s', dir_)", "response": "Wrap rmtree to inform user about ( un ) success."} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef _prune_subdirs(dir_: str) -> None:\n for logdir in [path.join(dir_, f) for f in listdir(dir_) if is_train_dir(path.join(dir_, f))]:\n for subdir in [path.join(logdir, f) for f in listdir(logdir) if path.isdir(path.join(logdir, f))]:\n _safe_rmtree(subdir)", "response": "Delete all subdirs in training log dirs."} {"SOURCE": "codesearchnet", "instruction": "Can you create a Python 3 function that\ndeletes all training dirs with incomplete training artifacts or less than specified epochs done.", "response": "def _prune(dir_: str, epochs: int) -> None:\n \"\"\"\n Delete all training dirs with incomplete training artifacts or with less than specified epochs done.\n\n :param dir_: dir with training log dirs\n :param epochs: minimum number of finished epochs to keep the training logs\n :return: number of log dirs pruned\n \"\"\"\n for logdir in [path.join(dir_, f) for f in listdir(dir_) if path.isdir(path.join(dir_, f))]:\n if not is_train_dir(logdir):\n _safe_rmtree(logdir)\n else:\n trace_path = path.join(logdir, CXF_TRACE_FILE)\n try:\n epochs_done = TrainingTrace.from_file(trace_path)[TrainingTraceKeys.EPOCHS_DONE]\n except (KeyError, TypeError):\n epochs_done = 0\n if not epochs_done or epochs_done < epochs:\n _safe_rmtree(logdir)"} {"SOURCE": "codesearchnet", "instruction": "Implement a Python 3 function for\npruning training log dirs contained in the given dir.", "response": "def prune_train_dirs(dir_: str, epochs: int, subdirs: bool) -> None:\n \"\"\"\n Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.\n\n :param dir_: dir to be pruned\n :param epochs: minimum number of finished epochs to keep the training logs\n :param subdirs: delete subdirs in training log dirs\n \"\"\"\n\n if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):\n print('The default log directory `{}` does not exist.\\n'\n 'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))\n quit(1)\n\n if not path.exists(dir_):\n print('Specified dir `{}` does not exist'.format(dir_))\n quit(1)\n\n _prune(dir_, epochs)\n if subdirs:\n _prune_subdirs(dir_)"} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 function that can\nlist of model output names.", "response": "def output_names(self) -> Iterable[str]:\n \"\"\"List of model output names.\"\"\"\n self._load_models()\n return chain.from_iterable(map(lambda m: m.output_names, self._models))"} {"SOURCE": "codesearchnet", "instruction": "Can you generate a brief explanation for the following Python 3 code\ndef run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:\n if train:\n raise ValueError('Ensemble model cannot be trained.')\n self._load_models()\n\n # run all the models in-order\n current_batch = dict(copy.deepcopy(batch))\n for model in self._models:\n current_batch.update(model.run(current_batch, False, None))\n\n return {key: current_batch[key] for key in self.output_names}", "response": "Runs all the models in - order and return accumulated outputs."} {"SOURCE": "codesearchnet", "instruction": "Write a Python 3 script for\nlogging variables from the epoch data.", "response": "def _log_variables(self, epoch_data: EpochData):\n \"\"\"\n Log variables from the epoch data.\n\n .. warning::\n At the moment, only scalars and dicts of scalars are properly formatted and logged.\n Other value types are ignored by default.\n\n One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.\n\n :param epoch_data: epoch data to be logged\n :raise KeyError: if the specified variable is not found in the stream\n :raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``\n \"\"\"\n for stream_name in epoch_data.keys():\n stream_data = epoch_data[stream_name]\n variables = self._variables if self._variables is not None else stream_data.keys()\n for variable in variables:\n if variable not in stream_data:\n raise KeyError('Variable `{}` to be logged was not found in the batch data for stream `{}`. '\n 'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))\n value = stream_data[variable]\n if np.isscalar(value):\n logging.info('\\t%s %s: %f', stream_name, variable, value)\n elif isinstance(value, dict):\n keys = list(value.keys())\n if len(keys) == 1:\n logging.info('\\t%s %s %s: %f', stream_name, variable, keys[0], value[keys[0]])\n else:\n logging.info('\\t%s %s:', stream_name, variable)\n for key, val in value.items():\n logging.info('\\t\\t%s: %f', key, val)\n else:\n if self._on_unknown_type == 'error':\n raise TypeError('Variable type `{}` can not be logged. Variable name: `{}`.'\n .format(type(value).__name__, variable))\n elif self._on_unknown_type == 'warn':\n logging.warning('Variable type `%s` can not be logged. Variable name: `%s`.',\n type(value).__name__, variable)\n elif self._on_unknown_type == 'str':\n logging.info('\\t%s %s: %s', stream_name, variable, value)"} {"SOURCE": "codesearchnet", "instruction": "Given the following Python 3 function, write the documentation\ndef get_reference_end_from_cigar(reference_start, cigar):\n '''\n This returns the coordinate just past the last aligned base.\n This matches the behavior of pysam's reference_end method\n '''\n reference_end = reference_start\n \n # iterate through cigartuple\n for i in xrange(len(cigar)):\n k, n = cigar[i]\n if k in (0,2,3,7,8): # M, D, N, =, X\n reference_end += n\n return reference_end", "response": "This returns the coordinate just past the last aligned base."}