docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Converts a human-readable timestamp into a Python ``DateTime`` object Args: human_timestamp (str): A timestamp string to_utc (bool): Convert the timestamp to UTC Returns: DateTime: The converted timestamp
def human_timestamp_to_datetime(human_timestamp, to_utc=False): settings = {} if to_utc: settings = {"TO_TIMEZONE": "UTC"} return dateparser.parse(human_timestamp, settings=settings)
364,016
Uses the MaxMind Geolite2 Country database to return the ISO code for the country associated with the given IPv4 or IPv6 address Args: ip_address (str): The IP address to query for parallel (bool): Parallel processing Returns: str: And ISO country code associated with the given IP address
def get_ip_address_country(ip_address, parallel=False): def download_country_database(location="GeoLite2-Country.mmdb"): if parallel: logging.warning("Cannot download GeoIP database in parallel mode") return url = "https://geolite.maxmind.com/download/geoip/database/" \ "GeoLite2-Country.tar.gz" # Use a browser-like user agent string to bypass some proxy blocks headers = {"User-Agent": USER_AGENT} original_filename = "GeoLite2-Country.mmdb" try: response = requests.get(url, headers=headers) response.raise_for_status() tar_bytes = response.content tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode="r:gz") tar_dir = tar_file.getnames()[0] tar_path = "{0}/{1}".format(tar_dir, original_filename) tar_file.extract(tar_path) shutil.move(tar_path, location) shutil.rmtree(tar_dir) except Exception as e: logger.warning("Error downloading {0}: {1}".format(url, e.__str__())) system_paths = [ "GeoLite2-Country.mmdb", "/usr/local/share/GeoIP/GeoLite2-Country.mmdb", "/usr/share/GeoIP/GeoLite2-Country.mmdb", "/var/lib/GeoIP/GeoLite2-Country.mmdb", "/var/local/lib/GeoIP/GeoLite2-Country.mmdb", "C:\\GeoIP\\GeoLite2-Country.mmdb" ] db_path = None for system_path in system_paths: if os.path.exists(system_path): db_path = system_path break if db_path is None: db_path = os.path.join(tempdir, "GeoLite2-Country.mmdb") if not os.path.exists(db_path): download_country_database(db_path) if not os.path.exists(db_path): return None else: db_age = datetime.now() - datetime.fromtimestamp( os.stat(db_path).st_mtime) if db_age > timedelta(days=7): download_country_database() db_path = db_path db_reader = geoip2.database.Reader(db_path) country = None try: country = db_reader.country(ip_address).country.iso_code except geoip2.errors.AddressNotFoundError: pass return country
364,017
Returns reverse DNS and country information for the given IP address Args: ip_address (str): The IP address to check cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds parallel (bool): parallel processing Returns: OrderedDict: ``ip_address``, ``reverse_dns``
def get_ip_address_info(ip_address, cache=None, nameservers=None, timeout=2.0, parallel=False): ip_address = ip_address.lower() if cache: info = cache.get(ip_address, None) if info: return info info = OrderedDict() info["ip_address"] = ip_address reverse_dns = get_reverse_dns(ip_address, nameservers=nameservers, timeout=timeout) country = get_ip_address_country(ip_address, parallel=parallel) info["country"] = country info["reverse_dns"] = reverse_dns info["base_domain"] = None if reverse_dns is not None: base_domain = get_base_domain(reverse_dns) info["base_domain"] = base_domain return info
364,018
Converts a string to a string that is safe for a filename Args: string (str): A string to make safe for a filename Returns: str: A string safe for a filename
def get_filename_safe_string(string): invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n', '\r'] if string is None: string = "None" for char in invalid_filename_chars: string = string.replace(char, "") string = string.rstrip(".") return string
364,020
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to standard RFC 822 format Args: msg_bytes (bytes): the content of the .msg file Returns: A RFC 822 string
def convert_outlook_msg(msg_bytes): if not is_outlook_msg(msg_bytes): raise ValueError("The supplied bytes are not an Outlook MSG file") orig_dir = os.getcwd() tmp_dir = tempfile.mkdtemp() os.chdir(tmp_dir) with open("sample.msg", "wb") as msg_file: msg_file.write(msg_bytes) try: subprocess.check_call(["msgconvert", "sample.msg"], stdout=null_file, stderr=null_file) eml_path = "sample.eml" with open(eml_path, "rb") as eml_file: rfc822 = eml_file.read() except FileNotFoundError: raise EmailParserError( "Failed to convert Outlook MSG: msgconvert utility not found") finally: os.chdir(orig_dir) shutil.rmtree(tmp_dir) return rfc822
364,021
A simplified email parser Args: data: The RFC 822 message string, or MSG binary strip_attachment_payloads (bool): Remove attachment payloads Returns (dict): Parsed email data
def parse_email(data, strip_attachment_payloads=False): if type(data) == bytes: if is_outlook_msg(data): data = convert_outlook_msg(data) data = data.decode("utf-8", errors="replace") parsed_email = mailparser.parse_from_string(data) headers = json.loads(parsed_email.headers_json).copy() parsed_email = json.loads(parsed_email.mail_json).copy() parsed_email["headers"] = headers if "received" in parsed_email: for received in parsed_email["received"]: if "date_utc" in received: if received["date_utc"] is None: del received["date_utc"] else: received["date_utc"] = received["date_utc"].replace("T", " ") if "from" not in parsed_email: if "From" in parsed_email["headers"]: parsed_email["from"] = parsed_email["Headers"]["From"] else: parsed_email["from"] = None if parsed_email["from"] is not None: parsed_email["from"] = parse_email_address(parsed_email["from"][0]) if "date" in parsed_email: parsed_email["date"] = parsed_email["date"].replace("T", " ") else: parsed_email["date"] = None if "reply_to" in parsed_email: parsed_email["reply_to"] = list(map(lambda x: parse_email_address(x), parsed_email["reply_to"])) else: parsed_email["reply_to"] = [] if "to" in parsed_email: parsed_email["to"] = list(map(lambda x: parse_email_address(x), parsed_email["to"])) else: parsed_email["to"] = [] if "cc" in parsed_email: parsed_email["cc"] = list(map(lambda x: parse_email_address(x), parsed_email["cc"])) else: parsed_email["cc"] = [] if "bcc" in parsed_email: parsed_email["bcc"] = list(map(lambda x: parse_email_address(x), parsed_email["bcc"])) else: parsed_email["bcc"] = [] if "delivered_to" in parsed_email: parsed_email["delivered_to"] = list( map(lambda x: parse_email_address(x), parsed_email["delivered_to"]) ) if "attachments" not in parsed_email: parsed_email["attachments"] = [] else: for attachment in parsed_email["attachments"]: if "payload" in attachment: payload = attachment["payload"] try: if "content_transfer_encoding" in attachment: if attachment["content_transfer_encoding"] == "base64": payload = decode_base64(payload) else: payload = str.encode(payload) attachment["sha256"] = hashlib.sha256(payload).hexdigest() except Exception as e: logger.debug("Unable to decode attachment: {0}".format( e.__str__() )) if strip_attachment_payloads: for attachment in parsed_email["attachments"]: if "payload" in attachment: del attachment["payload"] if "subject" not in parsed_email: parsed_email["subject"] = None parsed_email["filename_safe_subject"] = get_filename_safe_string( parsed_email["subject"]) if "body" not in parsed_email: parsed_email["body"] = None return parsed_email
364,022
Initializes a new instance of :see:LocalizedRef. Arguments: name: The field/column to select from. lang: The language to get the field/column in. If not specified, the currently active language is used.
def __init__(self, name: str, lang: str=None): language = lang or translation.get_language() or settings.LANGUAGE_CODE super().__init__(name, language)
364,428
Compresses the values from individual fields into a single :see:LocalizedValue instance. Arguments: value: The values from all the widgets. Returns: A :see:LocalizedValue containing all the value in several languages.
def compress(self, value: List[str]) -> value_class: localized_value = self.value_class() for (lang_code, _), value in zip(settings.LANGUAGES, value): localized_value.set(lang_code, value) return localized_value
364,433
Ran just before the model is saved, allows us to built the slug. Arguments: instance: The model that is being saved. add: Indicates whether this is a new entry to the database or an update.
def pre_save(self, instance, add: bool): slugs = LocalizedValue() for lang_code, value in self._get_populate_values(instance): if not value: continue if self.include_time: value += '-%s' % datetime.now().microsecond def is_unique(slug: str, language: str) -> bool: unique_filter = { '%s__%s' % (self.name, language): slug } return not type(instance).objects.filter(**unique_filter).exists() slug = self._make_unique_slug( slugify(value, allow_unicode=True), lang_code, is_unique ) slugs.set(lang_code, slug) setattr(instance, self.name, slugs) return slugs
364,439
Guarentees that the specified slug is unique by appending a number until it is unique. Arguments: slug: The slug to make unique. is_unique: Function that can be called to verify whether the generate slug is unique. Returns: A guarenteed unique slug.
def _make_unique_slug(slug: str, language: str, is_unique: Callable[[str], bool]) -> str: index = 1 unique_slug = slug while not is_unique(unique_slug, language): unique_slug = '%s-%d' % (slug, index) index += 1 return unique_slug
364,440
Gets all values (for each language) from the specified's instance's `populate_from` field. Arguments: instance: The instance to get the values from. Returns: A list of (lang_code, value) tuples.
def _get_populate_values(self, instance) -> Tuple[str, str]: return [ ( lang_code, self._get_populate_from_value( instance, self.populate_from, lang_code ), ) for lang_code, _ in settings.LANGUAGES ]
364,441
Gets the value to create a slug from in the specified language. Arguments: instance: The model that the field resides on. field_name: The name of the field to generate a slug for. language: The language to generate the slug for. Returns: The text to generate a slug for.
def _get_populate_from_value(instance, field_name: Union[str, Tuple[str]], language: str): if callable(field_name): return field_name(instance) def get_field_value(name): value = resolve_object_property(instance, name) with translation.override(language): return str(value) if isinstance(field_name, tuple) or isinstance(field_name, list): value = '-'.join([ value for value in [get_field_value(name) for name in field_name] if value ]) return value return get_field_value(field_name)
364,442
Ran just before the model is saved, allows us to built the slug. Arguments: instance: The model that is being saved. add: Indicates whether this is a new entry to the database or an update. Returns: The localized slug that was generated.
def pre_save(self, instance, add: bool): if not isinstance(instance, AtomicSlugRetryMixin): raise ImproperlyConfigured(( 'Model \'%s\' does not inherit from AtomicSlugRetryMixin. ' 'Without this, the LocalizedUniqueSlugField will not work.' ) % type(instance).__name__) slugs = LocalizedValue() for lang_code, value in self._get_populate_values(instance): if not value: continue slug = slugify(value, allow_unicode=True) # verify whether it's needed to re-generate a slug, # if not, re-use the same slug if instance.pk is not None: current_slug = getattr(instance, self.name).get(lang_code) if current_slug is not None: stripped_slug = current_slug[0:current_slug.rfind('-')] if slug == stripped_slug: slugs.set(lang_code, current_slug) continue if self.include_time: slug += '-%d' % datetime.now().microsecond retries = getattr(instance, 'retries', 0) if retries > 0: # do not add another - if we already added time if not self.include_time: slug += '-' slug += '%d' % retries slugs.set(lang_code, slug) setattr(instance, self.name, slugs) return slugs
364,445
Adds this field to the specifed model. Arguments: cls: The model to add the field to. name: The name of the field to add.
def contribute_to_class(self, model, name, **kwargs): super(LocalizedField, self).contribute_to_class(model, name, **kwargs) setattr(model, self.name, self.descriptor_class(self))
364,447
Turns the specified database value into its Python equivalent. Arguments: value: The value that is stored in the database and needs to be converted to its Python equivalent. Returns: A :see:LocalizedValue instance containing the data extracted from the database.
def from_db_value(cls, value, *_) -> Optional[LocalizedValue]: if not value: if getattr(settings, 'LOCALIZED_FIELDS_EXPERIMENTAL', False): return None else: return cls.attr_class() # we can get a list if an aggregation expression was used.. # if we the expression was flattened when only one key was selected # then we don't wrap each value in a localized value, otherwise we do if isinstance(value, list): result = [] for inner_val in value: if isinstance(inner_val, dict): if inner_val is None: result.append(None) else: result.append(cls.attr_class(inner_val)) else: result.append(inner_val) return result # this is for when you select an individual key, it will be string, # not a dictionary, we'll give it to you as a flat value, not as a # localized value instance if not isinstance(value, dict): return value return cls.attr_class(value)
364,448
Turns the specified database value into its Python equivalent. Arguments: value: The value that is stored in the database and needs to be converted to its Python equivalent. Returns: A :see:LocalizedValue instance containing the data extracted from the database.
def to_python(self, value: Union[dict, str, None]) -> LocalizedValue: # first let the base class handle the deserialization, this is in case we # get specified a json string representing a dict try: deserialized_value = super(LocalizedField, self).to_python(value) except json.JSONDecodeError: deserialized_value = value if not deserialized_value: return self.attr_class() return self.attr_class(deserialized_value)
364,449
Cleans the specified value into something we can store in the database. For example, when all the language fields are left empty, and the field is allowed to be null, we will store None instead of empty keys. Arguments: value: The value to clean. Returns: The cleaned value, ready for database storage.
def clean(self, value, *_): if not value or not isinstance(value, LocalizedValue): return None # are any of the language fiels None/empty? is_all_null = True for lang_code, _ in settings.LANGUAGES: if value.get(lang_code) is not None: is_all_null = False break # all fields have been left empty and we support # null values, let's return null to represent that if is_all_null and self.null: return None return value
364,451
Validates that the values has been filled in for all required languages Exceptions are raises in order to notify the user of invalid values. Arguments: value: The value to validate.
def validate(self, value: LocalizedValue, *_): if self.null: return for lang in self.required: lang_val = getattr(value, settings.LANGUAGE_CODE) if lang_val is None: raise IntegrityError('null value in column "%s.%s" violates ' 'not-null constraint' % (self.name, lang))
364,452
Gets the underlying value in the specified or primary language. Arguments: language: The language to get the value in. Returns: The value in the current language, or the primary language in case no language was specified.
def get(self, language: str=None, default: str=None) -> str: language = language or settings.LANGUAGE_CODE value = super().get(language, default) return value if value is not None else default
364,454
Sets the value in the specified language. Arguments: language: The language to set the value in. value: The value to set.
def set(self, language: str, value: str): self[language] = value self.__dict__.update(self) return self
364,455
Interprets a value passed in the constructor as a :see:LocalizedValue. If string: Assumes it's the default language. If dict: Each key is a language and the value a string in that language. If list: Recurse into to apply rules above. Arguments: value: The value to interpret.
def _interpret_value(self, value): for lang_code, _ in settings.LANGUAGES: self.set(lang_code, self.default_value) if isinstance(value, str): self.set(settings.LANGUAGE_CODE, value) elif isinstance(value, dict): for lang_code, _ in settings.LANGUAGES: lang_value = value.get(lang_code, self.default_value) self.set(lang_code, lang_value) elif isinstance(value, collections.Iterable): for val in value: self._interpret_value(val)
364,457
Sets the value for a language with the specified name. Arguments: language: The language to set the value in. value: The value to set.
def __setattr__(self, language: str, value: str): self.set(language, value)
364,460
Decompresses the specified value so it can be spread over the internal widgets. Arguments: value: The :see:LocalizedValue to display in this widget. Returns: All values to display in the inner widgets.
def decompress(self, value: LocalizedValue) -> List[str]: result = [] for lang_code, _ in settings.LANGUAGES: if value: result.append(value.get(lang_code)) else: result.append(None) return result
364,480
Ran just before the model is saved, allows us to built the slug. Arguments: instance: The model that is being saved. add: Indicates whether this is a new entry to the database or an update.
def pre_save(self, instance, add: bool): localized_value = getattr(instance, self.attname) if not localized_value: return None for lang_code, _ in settings.LANGUAGES: value = localized_value.get(lang_code) if not value: continue localized_value.set( lang_code, bleach.clean(value, **get_bleach_default_options()) ) return localized_value
364,484
Load feature image data from image files. Args: images: A list of image filenames. names: An optional list of strings to use as the feature names. Must be in the same order as the images.
def _load_features_from_images(self, images, names=None): if names is not None and len(names) != len(images): raise Exception( "Lists of feature names and images must be of same length!") self.feature_names = names if names is not None else images self.feature_images = imageutils.load_imgs(images, self.masker)
365,354
Decode images using Pearson's r. Computes the correlation between each input image and each feature image across voxels. Args: imgs_to_decode: An ndarray of images to decode, with voxels in rows and images in columns. Returns: An n_features x n_images 2D array, with each cell representing the pearson correlation between the i'th feature and the j'th image across all voxels.
def _pearson_correlation(self, imgs_to_decode): x, y = imgs_to_decode.astype(float), self.feature_images.astype(float) return self._xy_corr(x, y)
365,355
Returns mappable data for a random subset of voxels. May be useful as a baseline in predictive analyses--e.g., to compare performance of a more principled feature selection method with simple random selection. Args: dataset: A Dataset instance n_voxels: An integer specifying the number of random voxels to select. Returns: A 2D numpy array with (randomly-selected) voxels in rows and mappables in columns.
def get_random_voxels(dataset, n_voxels): voxels = np.arange(dataset.masker.n_vox_in_vol) np.random.shuffle(voxels) selected = voxels[0:n_voxels] return dataset.get_image_data(voxels=selected)
365,383
Download the latest data files. Args: path (str): Location to save the retrieved data files. Defaults to current directory. unpack (bool): If True, unzips the data file post-download.
def download(path='.', url=None, unpack=False): if url is None: url = 'https://github.com/neurosynth/neurosynth-data/blob/master/current_data.tar.gz?raw=true' if os.path.exists(path) and os.path.isdir(path): basename = os.path.basename(url).split('?')[0] filename = os.path.join(path, basename) else: filename = path f = open(filename, 'wb') u = urlopen(url) file_size = int(u.headers["Content-Length"][0]) print("Downloading the latest Neurosynth files: {0} bytes: {1}".format( url, file_size)) bytes_dl = 0 block_size = 8192 while True: buffer = u.read(block_size) if not buffer: break bytes_dl += len(buffer) f.write(buffer) p = float(bytes_dl) / file_size status = r"{0} [{1:.2%}]".format(bytes_dl, p) status = status + chr(8) * (len(status) + 1) sys.stdout.write(status) f.close() if unpack: import tarfile tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))
365,390
Load activation data from a text file. Args: filename (str): a string pointing to the location of the txt file to read from.
def _load_activations(self, filename): logger.info("Loading activation data from %s..." % filename) activations = pd.read_csv(filename, sep='\t') activations.columns = [col.lower() for col in list(activations.columns)] # Make sure all mandatory columns exist mc = ['x', 'y', 'z', 'id', 'space'] if (set(mc) - set(list(activations.columns))): logger.error( "At least one of mandatory columns (x, y, z, id, and space) " "is missing from input file.") return # Transform to target space where needed spaces = activations['space'].unique() xyz = activations[['x', 'y', 'z']].values for s in spaces: if s != self.transformer.target: inds = activations['space'] == s xyz[inds] = self.transformer.apply(s, xyz[inds]) activations[['x', 'y', 'z']] = xyz # xyz --> ijk ijk = pd.DataFrame( transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k']) activations = pd.concat([activations, ijk], axis=1) return activations
365,393
A convenience wrapper for ImageTable.get_image_data(). Args: ids (list, array): A list or 1D numpy array of study ids to return. If None, returns data for all studies. voxels (list, array): A list or 1D numpy array of voxel indices (i.e., rows) to return. If None, returns data for all voxels.
def get_image_data(self, ids=None, voxels=None, dense=True): return self.image_table.get_image_data(ids, voxels=voxels, dense=dense)
365,397
Given a list of features, returns features in order that they appear in database. Args: features (list): A list or 1D numpy array of named features to return. Returns: A list of features in order they appear in database.
def get_ordered_names(self, features): idxs = np.where( np.in1d(self.data.columns.values, np.array(features)))[0] return list(self.data.columns[idxs].values)
365,408
Returns all features that match any of the elements in the input list. Args: search (str, list): A string or list of strings defining the query. Returns: A list of matching feature names.
def search_features(self, search): if isinstance(search, string_types): search = [search] search = [s.replace('*', '.*') for s in search] cols = list(self.data.columns) results = [] for s in search: results.extend([f for f in cols if re.match(s + '$', f)]) return list(set(results))
365,410
Write out any images generated by the meta-analysis. Args: output_dir (str): folder to write images to prefix (str): all image files will be prepended with this string prefix_sep (str): glue between the prefix and rest of filename image_list (list): optional list of images to save--e.g., ['pFgA_z', 'pAgF']. If image_list is None (default), will save all images.
def save_results(self, output_dir='.', prefix='', prefix_sep='_', image_list=None): if prefix == '': prefix_sep = '' if not exists(output_dir): makedirs(output_dir) logger.debug("Saving results...") if image_list is None: image_list = self.images.keys() for suffix, img in self.images.items(): if suffix in image_list: filename = prefix + prefix_sep + suffix + '.nii.gz' outpath = join(output_dir, filename) imageutils.save_img(img, outpath, self.dataset.masker)
365,421
Initialize a new Masker. Args: volume: A volume indicating the global space within which all subsequent layers must reside. Any voxel in the mask with a non-zero valid is considered valid for analyses. Can be either an image filename or a NiBabel image. layers: Optional masking layers to add; see docstring for add().
def __init__(self, volume, layers=None): if isinstance(volume, string_types): volume = nb.load(volume) self.volume = volume data = self.volume.get_data() self.dims = data.shape self.vox_dims = self.get_header().get_zooms() self.full = np.float64(data.ravel()) self.global_mask = np.where(self.full) self.reset() if layers is not None: self.add(layers)
365,426
Add one or more layers to the stack of masking layers. Args: layers: A string, NiBabel image, list, or dict. If anything other than a dict is passed, assigns sequential layer names based on the current position in stack; if a dict, uses key as the name and value as the mask image.
def add(self, layers, above=None, below=None): def add_named_layer(name, image): image = self.get_image(image, output='vector') if above is not None: image[image < above] = 0. if below is not None: image[image > below] = 0. self.layers[name] = image self.stack.append(name) if isinstance(layers, dict): for (name, image) in layers.items(): add_named_layer(name, image) else: if not isinstance(layers, list): layers = [layers] for image in layers: name = 'layer_%d' % len(self.stack) add_named_layer(name, image) self.set_mask()
365,428
Remove one or more layers from the stack of masking layers. Args: layers: An int, string or list of strings and/or ints. Ints are interpreted as indices in the stack to remove; strings are interpreted as names of layers to remove. Negative ints will also work--i.e., remove(-1) will drop the last layer added.
def remove(self, layers): if not isinstance(layers, list): layers = [layers] for l in layers: if isinstance(l, string_types): if l not in self.layers: raise ValueError("There's no image/layer named '%s' in " "the masking stack!" % l) self.stack.remove(l) else: l = self.stack.pop(l) del self.layers[l] self.set_mask()
365,429
Set the current mask by taking the conjunction of all specified layers. Args: layers: Which layers to include. See documentation for add() for format. include_global_mask: Whether or not to automatically include the global mask (i.e., self.volume) in the conjunction.
def get_mask(self, layers=None, output='vector', in_global_mask=True): if in_global_mask: output = 'vector' if layers is None: layers = self.layers.keys() elif not isinstance(layers, list): layers = [layers] layers = map(lambda x: x if isinstance(x, string_types) else self.stack[x], layers) layers = [self.layers[l] for l in layers if l in self.layers] # Always include the original volume layers.append(self.full) layers = np.vstack(layers).T.astype(bool) mask = layers.all(axis=1) mask = self.get_image(mask, output) return mask[self.global_mask] if in_global_mask else mask
365,433
Load multiple images from file into an ndarray. Args: filenames: A single filename or list of filenames pointing to valid images. masker: A Masker instance. nan_to_num: Optional boolean indicating whether to convert NaNs to zero. Returns: An m x n 2D numpy array, where m = number of voxels in mask and n = number of images passed.
def load_imgs(filenames, masker, nan_to_num=True): if isinstance(filenames, string_types): filenames = [filenames] data = np.zeros((masker.n_vox_in_mask, len(filenames))) for i, f in enumerate(filenames): data[:, i] = masker.mask(f, nan_to_num) return data
365,437
Find padding index. Args: array (list): integer list. Returns: idx: padding index. Examples: >>> array = [1, 2, 0] >>> self.find_pad_index(array) 2
def find_pad_index(self, array): try: return list(array).index(self.pad_value) except ValueError: return len(array)
365,722
Get true length of y. Args: y (list): padded list. Returns: lens: true length of y. Examples: >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]] >>> self.get_length(y) [1, 2, 3]
def get_length(self, y): lens = [self.find_pad_index(row) for row in y] return lens
365,723
Convert label index to name. Args: y (list): label index list. lens (list): true length of y. Returns: y: label name list. Examples: >>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'} >>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]] >>> lens = [1, 2, 3] >>> self.convert_idx_to_name(y, lens) [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
def convert_idx_to_name(self, y, lens): y = [[self.id2label[idx] for idx in row[:l]] for row, l in zip(y, lens)] return y
365,724
Predict sequences. Args: X (list): input data. y (list): tags. Returns: y_true: true sequences. y_pred: predicted sequences.
def predict(self, X, y): y_pred = self.model.predict_on_batch(X) # reduce dimension. y_true = np.argmax(y, -1) y_pred = np.argmax(y_pred, -1) lens = self.get_length(y_true) y_true = self.convert_idx_to_name(y_true, lens) y_pred = self.convert_idx_to_name(y_pred, lens) return y_true, y_pred
365,725
Calculate f1 score. Args: y_true (list): true sequences. y_pred (list): predicted sequences. Returns: score: f1 score.
def score(self, y_true, y_pred): score = f1_score(y_true, y_pred) print(' - f1: {:04.2f}'.format(score * 100)) print(classification_report(y_true, y_pred, digits=4)) return score
365,726
Gets entities from sequence. Args: seq (list): sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: >>> from seqeval.metrics.sequence_labeling import get_entities >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC'] >>> get_entities(seq) [('PER', 0, 1), ('LOC', 3, 3)]
def get_entities(seq, suffix=False): # for nested list if any(isinstance(s, list) for s in seq): seq = [item for sublist in seq for item in sublist + ['O']] prev_tag = 'O' prev_type = '' begin_offset = 0 chunks = [] for i, chunk in enumerate(seq + ['O']): if suffix: tag = chunk[-1] type_ = chunk.split('-')[0] else: tag = chunk[0] type_ = chunk.split('-')[-1] if end_of_chunk(prev_tag, tag, prev_type, type_): chunks.append((prev_type, begin_offset, i-1)) if start_of_chunk(prev_tag, tag, prev_type, type_): begin_offset = i prev_tag = tag prev_type = type_ return chunks
365,730
Checks if a chunk ended between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_end: boolean.
def end_of_chunk(prev_tag, tag, prev_type, type_): chunk_end = False if prev_tag == 'E': chunk_end = True if prev_tag == 'S': chunk_end = True if prev_tag == 'B' and tag == 'B': chunk_end = True if prev_tag == 'B' and tag == 'S': chunk_end = True if prev_tag == 'B' and tag == 'O': chunk_end = True if prev_tag == 'I' and tag == 'B': chunk_end = True if prev_tag == 'I' and tag == 'S': chunk_end = True if prev_tag == 'I' and tag == 'O': chunk_end = True if prev_tag != 'O' and prev_tag != '.' and prev_type != type_: chunk_end = True return chunk_end
365,731
Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean.
def start_of_chunk(prev_tag, tag, prev_type, type_): chunk_start = False if tag == 'B': chunk_start = True if tag == 'S': chunk_start = True if prev_tag == 'E' and tag == 'E': chunk_start = True if prev_tag == 'E' and tag == 'I': chunk_start = True if prev_tag == 'S' and tag == 'E': chunk_start = True if prev_tag == 'S' and tag == 'I': chunk_start = True if prev_tag == 'O' and tag == 'E': chunk_start = True if prev_tag == 'O' and tag == 'I': chunk_start = True if tag != 'O' and tag != '.' and prev_type != type_: chunk_start = True return chunk_start
365,732
Validate item against version schema. Args: item: data object namespace: backend namespace version: schema version context: schema context object
def validate(item, namespace='accounts', version=2, context=None): if namespace == 'accounts': if version == 2: schema = v2.AccountSchema(strict=True, context=context) return schema.load(item).data elif version == 1: return v1.AccountSchema(strict=True).load(item).data raise InvalidSWAGDataException('Schema version is not supported. Version: {}'.format(version)) raise InvalidSWAGDataException('Namespace not supported. Namespace: {}'.format(namespace))
365,928
Returns a list of all TopicPartitions for a given topic. Arguments: consumer: an initialized KafkaConsumer topic: a topic name to fetch TopicPartitions for :returns: list(TopicPartition): A list of TopicPartitions that belong to the given topic
def consumer_partitions_for_topic(consumer, topic): topic_partitions = [] partitions = consumer.partitions_for_topic(topic) if partitions is not None: for partition in partitions: topic_partitions.append(TopicPartition(topic, partition)) else: logging.error( "No partitions found for topic {}. Maybe it doesn't exist?".format(topic), ) return topic_partitions
366,299
Encode an OffsetCommitRequest struct Arguments: group: string, the consumer group you are committing offsets for payloads: list of OffsetCommitRequestPayload
def encode_offset_commit_request_kafka(cls, group, payloads): return kafka.protocol.commit.OffsetCommitRequest[2]( consumer_group=group, consumer_group_generation_id=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_GENERATION_ID, consumer_id='', retention_time=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_RETENTION_TIME, topics=[( topic, [( partition, payload.offset, payload.metadata) for partition, payload in six.iteritems(topic_payloads)]) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
366,351
Decode GroupCoordinatorResponse. Note that ConsumerMetadataResponse is renamed to GroupCoordinatorResponse in 0.9+ Arguments: response: response to decode
def decode_consumer_metadata_response(cls, response): return ConsumerMetadataResponse( response.error_code, response.coordinator_id, response.host, response.port, )
366,352
Merges in an existing schema. arguments: * `schema` (required - `dict` or `SchemaNode`): an existing JSON Schema to merge.
def add_schema(self, schema): # serialize instances of SchemaNode before parsing if isinstance(schema, SchemaNode): schema = schema.to_schema() for subschema in self._get_subschemas(schema): # delegate to SchemaType object schema_generator = self._get_generator_for_schema(subschema) schema_generator.add_schema(subschema) # return self for easy method chaining return self
366,775
Modify the schema to accommodate an object. arguments: * `obj` (required - `dict`): a JSON object to use in generating the schema.
def add_object(self, obj): # delegate to SchemaType object schema_generator = self._get_generator_for_object(obj) schema_generator.add_object(obj) # return self for easy method chaining return self
366,776
Modifies a connection. Args: id: Id of the connection. body (dict): Specifies which fields are to be modified, and to what values. See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id Returns: The modified connection object.
def update(self, id, body): return self.client.patch(self._url(id), data=body)
367,040
Creates a new connection. Args: body (dict): Attributes used to create the connection. Mandatory attributes are: 'name' and 'strategy'. See: https://auth0.com/docs/api/management/v2#!/Connections/post_connections
def create(self, body): return self.client.post(self._url(), data=body)
367,041
Deletes a specified connection user by its email. Args: id (str): The id of the connection (must be a database connection). email (str): The email of the user to delete. See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email Returns: An empty dict.
def delete_user_by_email(self, id, email): return self.client.delete(self._url(id) + '/users', params={'email': email})
367,042
Deletes a grant. Args: id (str): The id of the custom domain to delete See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/delete_custom_domains_by_id
def delete(self, id): url = self._url('%s' % (id)) return self.client.delete(url)
367,045
Configure a new custom domain Args: body (str): The domain, tye and verification method in json See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/post_custom_domains
def create_new(self, body): return self.client.post(self._url(), data=body)
367,046
Verify a custom domain Args: id (str): The id of the custom domain to delete See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/post_verify
def verify(self, id): url = self._url('%s/verify' % (id)) return self.client.post(url)
367,047
Get SAML2.0 Metadata. Args: client_id (str): Client Id of the application to get the SAML metadata for.
def saml_metadata(self, client_id): return self.get(url='https://{}/samlp/metadata/{}'.format(self.domain, client_id))
367,048
Logout Use this endpoint to logout a user. If you want to navigate the user to a specific URL after the logout, set that URL at the returnTo parameter. The URL should be included in any the appropriate Allowed Logout URLs list: Args: client_id (str): The client_id of your application. returnTo (str): URL to redirect the user after the logout. federated (bool): Querystring parameter to log the user out of the IdP
def logout(self, client_id, return_to, federated=False): return_to = quote_plus(return_to) if federated is True: return self.get( 'https://{}/v2/logout?federated&client_id={}&returnTo={}'.format( self.domain, client_id, return_to), headers={'Content-Type': 'application/json'} ) return self.get( 'https://{}/v2/logout?client_id={}&returnTo={}'.format(self.domain, client_id, return_to), headers={'Content-Type': 'application/json'} )
367,051
Rotate a client secret. The generated secret is NOT base64 encoded. Args: id (str): Client ID of the application. body (dict): Attributes to modify. See: https://auth0.com/docs/api/management/v2#!/Clients/post_rotate_secret
def rotate_secret(self, id): params = {'id': id } url = self._url('%s/rotate-secret' % id) return self.client.get(url, params=params)
367,053
Retrieves all resource servers Args: page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/get_resource_servers
def get_all(self, page=None, per_page=None, include_totals=False): params = { 'page': page, 'per_page': per_page, 'include_totals': str(include_totals).lower() } return self.client.get(self._url(), params=params)
367,059
Gets blocks by identifier Args: identifier (str): Should be any of: username, phone_number, email. See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks
def get_by_identifier(self, identifier): params = {'identifier': identifier} return self.client.get(self._url(), params=params)
367,060
Unblocks by identifier Args: identifier (str): Should be any of: username, phone_number, email. See: https://auth0.com/docs/api/management/v2#!/User_Blocks/delete_user_blocks
def unblock_by_identifier(self, identifier): params = {'identifier': identifier} return self.client.delete(self._url(), params=params)
367,061
Update Guardian factor Useful to enable / disable factor Args: name (str): Either push-notification or sms body (dict): Attributes to modify. See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
def update_factor(self, name, body): url = self._url('factors/{}'.format(name)) return self.client.put(url, data=body)
367,063
Update enrollment and verification SMS templates. Useful to send custom messages on sms enrollment and verification Args: body (dict): Attributes to modify. See: https://auth0.com/docs/api/management/v2#!/Guardian/put_templates
def update_templates(self, body): return self.client.put(self._url('factors/sms/templates'), data=body)
367,064
Retrieves an enrollment. Useful to check its type and related metadata. Args: id (str): The id of the device account to update See: https://auth0.com/docs/api/management/v2#!/Guardian/get_enrollments_by_id
def get_enrollment(self, id): url = self._url('enrollments/{}'.format(id)) return self.client.get(url)
367,065
Deletes an enrollment. Useful when you want to force re-enroll. Args: id (str): The id of the device account to update See: https://auth0.com/docs/api/management/v2#!/Guardian/delete_enrollments_by_id
def delete_enrollment(self, id): url = self._url('enrollments/{}'.format(id)) return self.client.delete(url)
367,066
Creates an enrollment ticket for user_id A useful way to send an email to a user, with a link that lead to start the enrollment process Args: body (dict): Details of the user to send the ticket to. See: https://auth0.com/docs/api/management/v2#!/Guardian/post_ticket
def create_enrollment_ticket(self, body): return self.client.post(self._url('enrollments/ticket'), data=body)
367,067
Get Guardian SNS or SMS factor providers. Returns provider configuration Args: factor_name (str): Either push-notification or sms name (str): Name of the provider See: https://auth0.com/docs/api/management/v2#!/Guardian/get_sns https://auth0.com/docs/api/management/v2#!/Guardian/get_twilio
def get_factor_providers(self, factor_name, name): url = self._url('factors/{}/providers/{}'.format(factor_name, name)) return self.client.get(url)
367,068
Get Guardian factor providers. Returns provider configuration Args: factor_name (str): Either push-notification or sms name (str): Name of the provider body (dict): See: https://auth0.com/docs/api/management/v2#!/Guardian/put_twilio
def update_factor_providers(self, factor_name, name, body): url = self._url('factors/{}/providers/{}'.format(factor_name, name)) return self.client.put(url, data=body)
367,069
Removes the rules config for a given key. Args: key (str): rules config key to remove See: https://auth0.com/docs/api/management/v2#!/Rules_Configs/delete_rules_configs_by_key
def unset(self, key): params = { 'key': key } return self.client.delete(self._url(), params=params)
367,081
Sets the rules config for a given key. Args: key (str): rules config key to set value (str): value to set for the rules config key See: https://auth0.com/docs/api/management/v2#!/Rules_Configs/put_rules_configs_by_key
def set(self, key, value): url = self._url('{}'.format(key)) body = {'value': value} return self.client.put(url, data=body)
367,082
Gets the daily stats for a particular period. Args: from_date (str): The first day of the period (inclusive) in YYYYMMDD format. to_date (str): The last day of the period (inclusive) in YYYYMMDD format. See: https://auth0.com/docs/api/management/v2#!/Stats/get_daily
def daily_stats(self, from_date=None, to_date=None): return self.client.get(self._url('daily'), params={'from': from_date, 'to': to_date})
367,083
Create an email verification ticket. Args: body (dict): Please see: https://auth0.com/docs/api/v2#!/Tickets/post_email_verification
def create_email_verification(self, body): return self.client.post(self._url('email-verification'), data=body)
367,084
Create password change ticket. Args: body (dict): Please see: https://auth0.com/docs/api/v2#!/Tickets/post_password_change
def create_pswd_change(self, body): return self.client.post(self._url('password-change'), data=body)
367,085
Retrieves the jti and aud of all tokens in the blacklist. Args: aud (str, optional): The JWT's aud claim. The client_id of the application for which it was issued. See: https://auth0.com/docs/api/management/v2#!/Blacklists/get_tokens
def get(self, aud=None): params = { 'aud': aud } return self.client.get(self.url, params=params)
367,090
Adds a token to the blacklist. Args: jti (str): the jti of the JWT to blacklist. aud (str, optional): The JWT's aud claim. The client_id of the application for which it was issued. body (dict): See: https://auth0.com/docs/api/management/v2#!/Blacklists/post_tokens
def create(self, jti, aud=''): return self.client.post(self.url, data={'jti': jti, 'aud': aud})
367,091
Delete a user's multifactor provider. Args: id (str): The user's id. provider (str): The multifactor provider. Supported values 'duo' or 'google-authenticator' See: https://auth0.com/docs/api/management/v2#!/Users/delete_multifactor_by_provider
def delete_multifactor(self, id, provider): url = self._url('{}/multifactor/{}'.format(id, provider)) return self.client.delete(url)
367,093
Unlink a user account Args: id (str): The user_id of the user identity. provider (str): The type of identity provider (e.g: facebook). user_id (str): The unique identifier for the user for the identity. See: https://auth0.com/docs/api/management/v2#!/Users/delete_user_identity_by_user_id
def unlink_user_account(self, id, provider, user_id): url = self._url('{}/identities/{}/{}'.format(id, provider, user_id)) return self.client.delete(url)
367,094
Link user accounts. Links the account specified in the body (secondary account) to the account specified by the id param of the URL (primary account). Args: id (str): The user_id of the primary identity where you are linking the secondary account to. body (dict): Please see: https://auth0.com/docs/api/v2#!/Users/post_identities
def link_user_account(self, user_id, body): url = self._url('{}/identities'.format(user_id)) return self.client.post(url, data=body)
367,095
Removes the current recovery token, generates and returns a new one Args: user_id (str): The user_id of the user identity. See: https://auth0.com/docs/api/management/v2#!/Users/post_recovery_code_regeneration
def regenerate_recovery_code(self, user_id): url = self._url('{}/recovery-code-regeneration'.format(user_id)) return self.client.post(url)
367,096
Retrieves all Guardian enrollments. Args: user_id (str): The user_id of the user to retrieve See: https://auth0.com/docs/api/management/v2#!/Users/get_enrollments
def get_guardian_enrollments(self, user_id): url = self._url('{}/enrollments'.format(user_id)) return self.client.get(url)
367,097
Get failed job error details Args: id (str): The id of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_errors
def get_failed_job(self, id): url = self._url('{}/errors'.format(id)) return self.client.get(url)
367,101
Get results of a job Args: job_id (str): The ID of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_results
def get_results(self, job_id): url = self._url('%s/results' % job_id) return self.client.get(url)
367,102
Export all users to a file using a long running job. Check job status with get(). URL pointing to the export file will be included in the status once the job is complete. Args: body (dict): Please see: https://auth0.com/docs/api/management/v2#!/Jobs/post_users_exports
def export_users(self, body): return self.client.post(self._url('users-exports'), data=body)
367,103
Imports users to a connection from a file. Args: connection_id (str): The connection id of the connection to which users will be inserted. file_obj (file): A file-like object to upload. The format for this file is explained in: https://auth0.com/docs/bulk-import See: https://auth0.com/docs/api/management/v2#!/Jobs/post_users_imports
def import_users(self, connection_id, file_obj, upsert=False): return self.client.file_post(self._url('users-imports'), data={'connection_id': connection_id, 'upsert': str(upsert).lower()}, files={'users': file_obj})
367,104
Send verification email. Send an email to the specified user that asks them to click a link to verify their email address. Args: body (dict): Please see: https://auth0.com/docs/api/v2#!/Jobs/post_verification_email
def send_verification_email(self, body): return self.client.post(self._url('verification-email'), data=body)
367,105
Configure the email provider. Args: body (dict): Please see: https://auth0.com/docs/api/v2#!/Emails/post_provider
def config(self, body): return self.client.post(self._url(), data=body)
367,109
Returns the user information based on the Auth0 access token. This endpoint will work only if openid was granted as a scope for the access_token. Args: access_token (str): Auth0 access token (obtained during login). Returns: The user profile.
def userinfo(self, access_token): return self.get( url='https://{}/userinfo'.format(self.domain), headers={'Authorization': 'Bearer {}'.format(access_token)} )
367,110
Returns user profile based on the user's jwt Validates a JSON Web Token (signature and expiration) and returns the user information associated with the user id (sub property) of the token. Args: jwt (str): User's jwt Returns: The user profile.
def tokeninfo(self, jwt): warnings.warn("/tokeninfo will be deprecated in future releases", DeprecationWarning) return self.post( url='https://{}/tokeninfo'.format(self.domain), data={'id_token': jwt}, headers={'Content-Type': 'application/json'} )
367,111
Base function for one time http requests. Args: method (str): The http method to use. For example 'GET' uri (str): The url of the resource. Example: 'https://example.com/stuff' kwargs: Any number of arguments supported, found here: http://asks.rtfd.io/en/latest/overview-of-funcs-and-args.html Returns: Response (asks.Response): The Response object.
async def request(method, uri, **kwargs): c_interact = kwargs.pop('persist_cookies', None) ssl_context = kwargs.pop('ssl_context', None) async with Session(persist_cookies=c_interact, ssl_context=ssl_context) as s: r = await s.request(method, url=uri, **kwargs) return r
367,714
Forms multipart requests from a dict with name, path k/vs. Name does not have to be the actual file name. Args: files_dict (dict): A dict of `filename:filepath`s, to be sent as multipart files. Returns: multip_pkg (str): The strings representation of the content body, multipart formatted.
async def _multipart(self, files_dict): boundary = bytes(_BOUNDARY, self.encoding) hder_format = 'Content-Disposition: form-data; name="{}"' hder_format_io = '; filename="{}"' multip_pkg = b'' num_of_parts = len(files_dict) for index, kv in enumerate(files_dict.items(), start=1): multip_pkg += (b'--' + boundary + b'\r\n') k, v = kv try: pkg_body = await self._file_manager(v) multip_pkg += bytes(hder_format.format(k) + hder_format_io.format(basename(v)), self.encoding) mime_type = mimetypes.guess_type(basename(v)) if not mime_type[1]: mime_type = 'application/octet-stream' else: mime_type = '/'.join(mime_type) multip_pkg += bytes('; Content-Type: ' + mime_type, self.encoding) multip_pkg += b'\r\n'*2 + pkg_body except (TypeError, FileNotFoundError): pkg_body = bytes(v, self.encoding) + b'\r\n' multip_pkg += bytes(hder_format.format(k) + '\r\n'*2, self.encoding) multip_pkg += pkg_body if index == num_of_parts: multip_pkg += b'--' + boundary + b'--\r\n' return multip_pkg
367,728
Takes a package and body, combines then, then shoots 'em off in to the ether. Args: package (list of str): The header package. body (str): The str representation of the body.
async def _send(self, request_bytes, body_bytes, h11_connection): await self.sock.send_all(h11_connection.send(request_bytes)) if body_bytes is not None: await self.sock.send_all(h11_connection.send(body_bytes)) await self.sock.send_all(h11_connection.send(h11.EndOfMessage()))
367,732
Creates a normal async socket, returns it. Args: location (tuple(str, int)): A tuple of net location (eg '127.0.0.1' or 'example.org') and port (eg 80 or 25000).
async def _open_connection_http(self, location): sock = await connect_tcp(location[0], location[1], bind_host=self.source_address) sock._active = True return sock
367,740
Creates an async SSL socket, returns it. Args: location (tuple(str, int)): A tuple of net location (eg '127.0.0.1' or 'example.org') and port (eg 80 or 25000).
async def _open_connection_https(self, location): sock = await connect_tcp(location[0], location[1], ssl_context=self.ssl_context or ssl.SSLContext(), bind_host=self.source_address, autostart_tls=True) sock._active = True return sock
367,741
Gets a place's photo by reference. See detailed documentation at https://developers.google.com/places/documentation/photos Arguments: photoreference -- The unique Google reference for the required photo. Keyword arguments: maxheight -- The maximum desired photo height in pixels maxwidth -- The maximum desired photo width in pixels You must specify one of this keyword arguments. Acceptable value is an integer between 1 and 1600.
def _get_place_photo(photoreference, api_key, maxheight=None, maxwidth=None, sensor=False): params = {'photoreference': photoreference, 'sensor': str(sensor).lower(), 'key': api_key} if maxheight: params['maxheight'] = maxheight if maxwidth: params['maxwidth'] = maxwidth return _fetch_remote_file(GooglePlaces.PHOTO_API_URL, params)
368,360
Find the differences between two texts. Assumes that the texts do not have any common prefix or suffix. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Speedup flag. If false, then don't run a line-level diff first to identify the changed areas. If true, then run a faster, slightly less optimal diff. deadline: Time when the diff should be complete by. Returns: Array of changes.
def diff_compute(self, text1, text2, checklines, deadline): if not text1: # Just add some text (speedup). return [(self.DIFF_INSERT, text2)] if not text2: # Just delete some text (speedup). return [(self.DIFF_DELETE, text1)] if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) i = longtext.find(shorttext) if i != -1: # Shorter text is inside the longer text (speedup). diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[i + len(shorttext):])] # Swap insertions for deletions if diff is reversed. if len(text1) > len(text2): diffs[0] = (self.DIFF_DELETE, diffs[0][1]) diffs[2] = (self.DIFF_DELETE, diffs[2][1]) return diffs if len(shorttext) == 1: # Single character string. # After the previous speedup, the character can't be an equality. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] # Check to see if the problem can be split in two. hm = self.diff_halfMatch(text1, text2) if hm: # A half-match was found, sort out the return data. (text1_a, text1_b, text2_a, text2_b, mid_common) = hm # Send both pairs off for separate processing. diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) # Merge the results. return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b if checklines and len(text1) > 100 and len(text2) > 100: return self.diff_lineMode(text1, text2, deadline) return self.diff_bisect(text1, text2, deadline)
368,588
Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes.
def diff_lineMode(self, text1, text2, deadline): # Scan the text on a line-by-line basis first. (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) # Convert the diff back to original text. self.diff_charsToLines(diffs, linearray) # Eliminate freak matches (e.g. blank lines) self.diff_cleanupSemantic(diffs) # Rediff any replacement blocks, this time character-by-character. # Add a dummy entry at the end. diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete >= 1 and count_insert >= 1: # Delete the offending records and add the merged ones. subDiff = self.diff_main(text_delete, text_insert, False, deadline) diffs[pointer - count_delete - count_insert : pointer] = subDiff pointer = pointer - count_delete - count_insert + len(subDiff) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() # Remove the dummy entry at the end. return diffs
368,589
Find the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples.
def diff_bisect(self, text1, text2, deadline): # Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) max_d = (text1_length + text2_length + 1) // 2 v_offset = max_d v_length = 2 * max_d v1 = [-1] * v_length v1[v_offset + 1] = 0 v2 = v1[:] delta = text1_length - text2_length # If the total number of characters is odd, then the front path will # collide with the reverse path. front = (delta % 2 != 0) # Offsets for start and end of k loop. # Prevents mapping of space beyond the grid. k1start = 0 k1end = 0 k2start = 0 k2end = 0 for d in range(max_d): # Bail out if deadline is reached. if time.time() > deadline: break # Walk the front path one step. for k1 in range(-d + k1start, d + 1 - k1end, 2): k1_offset = v_offset + k1 if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]): x1 = v1[k1_offset + 1] else: x1 = v1[k1_offset - 1] + 1 y1 = x1 - k1 while (x1 < text1_length and y1 < text2_length and text1[x1] == text2[y1]): x1 += 1 y1 += 1 v1[k1_offset] = x1 if x1 > text1_length: # Ran off the right of the graph. k1end += 2 elif y1 > text2_length: # Ran off the bottom of the graph. k1start += 2 elif front: k2_offset = v_offset + delta - k1 if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1: # Mirror x2 onto top-left coordinate system. x2 = text1_length - v2[k2_offset] if x1 >= x2: # Overlap detected. return self.diff_bisectSplit(text1, text2, x1, y1, deadline) # Walk the reverse path one step. for k2 in range(-d + k2start, d + 1 - k2end, 2): k2_offset = v_offset + k2 if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]): x2 = v2[k2_offset + 1] else: x2 = v2[k2_offset - 1] + 1 y2 = x2 - k2 while (x2 < text1_length and y2 < text2_length and text1[-x2 - 1] == text2[-y2 - 1]): x2 += 1 y2 += 1 v2[k2_offset] = x2 if x2 > text1_length: # Ran off the left of the graph. k2end += 2 elif y2 > text2_length: # Ran off the top of the graph. k2start += 2 elif not front: k1_offset = v_offset + delta - k2 if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1: x1 = v1[k1_offset] y1 = v_offset + x1 - k1_offset # Mirror x2 onto top-left coordinate system. x2 = text1_length - x2 if x1 >= x2: # Overlap detected. return self.diff_bisectSplit(text1, text2, x1, y1, deadline) # Diff took too long and hit the deadline or # number of diffs equals number of characters, no commonality at all. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
368,590