repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
jazzband/django-model-utils
model_utils/managers.py
SoftDeletableManagerMixin.get_queryset
def get_queryset(self): """ Return queryset limited to not removed entries. """ kwargs = {'model': self.model, 'using': self._db} if hasattr(self, '_hints'): kwargs['hints'] = self._hints return self._queryset_class(**kwargs).filter(is_removed=False)
python
def get_queryset(self): """ Return queryset limited to not removed entries. """ kwargs = {'model': self.model, 'using': self._db} if hasattr(self, '_hints'): kwargs['hints'] = self._hints return self._queryset_class(**kwargs).filter(is_removed=False)
[ "def", "get_queryset", "(", "self", ")", ":", "kwargs", "=", "{", "'model'", ":", "self", ".", "model", ",", "'using'", ":", "self", ".", "_db", "}", "if", "hasattr", "(", "self", ",", "'_hints'", ")", ":", "kwargs", "[", "'hints'", "]", "=", "self...
Return queryset limited to not removed entries.
[ "Return", "queryset", "limited", "to", "not", "removed", "entries", "." ]
d557c4253312774a7c2f14bcd02675e9ac2ea05f
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/managers.py#L295-L303
train
222,000
jazzband/django-model-utils
model_utils/tracker.py
FieldInstanceTracker.previous
def previous(self, field): """Returns currently saved value of given field""" # handle deferred fields that have not yet been loaded from the database if self.instance.pk and field in self.deferred_fields and field not in self.saved_data: # if the field has not been assigned locally, simply fetch and un-defer the value if field not in self.instance.__dict__: self.get_field_value(field) # if the field has been assigned locally, store the local value, fetch the database value, # store database value to saved_data, and restore the local value else: current_value = self.get_field_value(field) self.instance.refresh_from_db(fields=[field]) self.saved_data[field] = deepcopy(self.get_field_value(field)) setattr(self.instance, self.field_map[field], current_value) return self.saved_data.get(field)
python
def previous(self, field): """Returns currently saved value of given field""" # handle deferred fields that have not yet been loaded from the database if self.instance.pk and field in self.deferred_fields and field not in self.saved_data: # if the field has not been assigned locally, simply fetch and un-defer the value if field not in self.instance.__dict__: self.get_field_value(field) # if the field has been assigned locally, store the local value, fetch the database value, # store database value to saved_data, and restore the local value else: current_value = self.get_field_value(field) self.instance.refresh_from_db(fields=[field]) self.saved_data[field] = deepcopy(self.get_field_value(field)) setattr(self.instance, self.field_map[field], current_value) return self.saved_data.get(field)
[ "def", "previous", "(", "self", ",", "field", ")", ":", "# handle deferred fields that have not yet been loaded from the database", "if", "self", ".", "instance", ".", "pk", "and", "field", "in", "self", ".", "deferred_fields", "and", "field", "not", "in", "self", ...
Returns currently saved value of given field
[ "Returns", "currently", "saved", "value", "of", "given", "field" ]
d557c4253312774a7c2f14bcd02675e9ac2ea05f
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/tracker.py#L142-L160
train
222,001
jazzband/django-model-utils
model_utils/tracker.py
FieldTracker.get_field_map
def get_field_map(self, cls): """Returns dict mapping fields names to model attribute names""" field_map = dict((field, field) for field in self.fields) all_fields = dict((f.name, f.attname) for f in cls._meta.fields) field_map.update(**dict((k, v) for (k, v) in all_fields.items() if k in field_map)) return field_map
python
def get_field_map(self, cls): """Returns dict mapping fields names to model attribute names""" field_map = dict((field, field) for field in self.fields) all_fields = dict((f.name, f.attname) for f in cls._meta.fields) field_map.update(**dict((k, v) for (k, v) in all_fields.items() if k in field_map)) return field_map
[ "def", "get_field_map", "(", "self", ",", "cls", ")", ":", "field_map", "=", "dict", "(", "(", "field", ",", "field", ")", "for", "field", "in", "self", ".", "fields", ")", "all_fields", "=", "dict", "(", "(", "f", ".", "name", ",", "f", ".", "at...
Returns dict mapping fields names to model attribute names
[ "Returns", "dict", "mapping", "fields", "names", "to", "model", "attribute", "names" ]
d557c4253312774a7c2f14bcd02675e9ac2ea05f
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/tracker.py#L202-L208
train
222,002
jazzband/django-model-utils
model_utils/models.py
add_status_query_managers
def add_status_query_managers(sender, **kwargs): """ Add a Querymanager for each status item dynamically. """ if not issubclass(sender, StatusModel): return if django.VERSION >= (1, 10): # First, get current manager name... default_manager = sender._meta.default_manager for value, display in getattr(sender, 'STATUS', ()): if _field_exists(sender, value): raise ImproperlyConfigured( "StatusModel: Model '%s' has a field named '%s' which " "conflicts with a status of the same name." % (sender.__name__, value) ) sender.add_to_class(value, QueryManager(status=value)) if django.VERSION >= (1, 10): # ...then, put it back, as add_to_class is modifying the default manager! sender._meta.default_manager_name = default_manager.name
python
def add_status_query_managers(sender, **kwargs): """ Add a Querymanager for each status item dynamically. """ if not issubclass(sender, StatusModel): return if django.VERSION >= (1, 10): # First, get current manager name... default_manager = sender._meta.default_manager for value, display in getattr(sender, 'STATUS', ()): if _field_exists(sender, value): raise ImproperlyConfigured( "StatusModel: Model '%s' has a field named '%s' which " "conflicts with a status of the same name." % (sender.__name__, value) ) sender.add_to_class(value, QueryManager(status=value)) if django.VERSION >= (1, 10): # ...then, put it back, as add_to_class is modifying the default manager! sender._meta.default_manager_name = default_manager.name
[ "def", "add_status_query_managers", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "if", "not", "issubclass", "(", "sender", ",", "StatusModel", ")", ":", "return", "if", "django", ".", "VERSION", ">=", "(", "1", ",", "10", ")", ":", "# First, get cur...
Add a Querymanager for each status item dynamically.
[ "Add", "a", "Querymanager", "for", "each", "status", "item", "dynamically", "." ]
d557c4253312774a7c2f14bcd02675e9ac2ea05f
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/models.py#L60-L83
train
222,003
jazzband/django-model-utils
model_utils/models.py
add_timeframed_query_manager
def add_timeframed_query_manager(sender, **kwargs): """ Add a QueryManager for a specific timeframe. """ if not issubclass(sender, TimeFramedModel): return if _field_exists(sender, 'timeframed'): raise ImproperlyConfigured( "Model '%s' has a field named 'timeframed' " "which conflicts with the TimeFramedModel manager." % sender.__name__ ) sender.add_to_class('timeframed', QueryManager( (models.Q(start__lte=now) | models.Q(start__isnull=True)) & (models.Q(end__gte=now) | models.Q(end__isnull=True)) ))
python
def add_timeframed_query_manager(sender, **kwargs): """ Add a QueryManager for a specific timeframe. """ if not issubclass(sender, TimeFramedModel): return if _field_exists(sender, 'timeframed'): raise ImproperlyConfigured( "Model '%s' has a field named 'timeframed' " "which conflicts with the TimeFramedModel manager." % sender.__name__ ) sender.add_to_class('timeframed', QueryManager( (models.Q(start__lte=now) | models.Q(start__isnull=True)) & (models.Q(end__gte=now) | models.Q(end__isnull=True)) ))
[ "def", "add_timeframed_query_manager", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "if", "not", "issubclass", "(", "sender", ",", "TimeFramedModel", ")", ":", "return", "if", "_field_exists", "(", "sender", ",", "'timeframed'", ")", ":", "raise", "Impr...
Add a QueryManager for a specific timeframe.
[ "Add", "a", "QueryManager", "for", "a", "specific", "timeframe", "." ]
d557c4253312774a7c2f14bcd02675e9ac2ea05f
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/models.py#L86-L102
train
222,004
invoice-x/invoice2data
src/invoice2data/input/tesseract4.py
to_text
def to_text(path, language='fra'): """Wraps Tesseract 4 OCR with custom language model. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ import subprocess from distutils import spawn import tempfile import time # Check for dependencies. Needs Tesseract and Imagemagick installed. if not spawn.find_executable('tesseract'): raise EnvironmentError('tesseract not installed.') if not spawn.find_executable('convert'): raise EnvironmentError('imagemagick not installed.') if not spawn.find_executable('gs'): raise EnvironmentError('ghostscript not installed.') with tempfile.NamedTemporaryFile(suffix='.tiff') as tf: # Step 1: Convert to TIFF gs_cmd = [ 'gs', '-q', '-dNOPAUSE', '-r600x600', '-sDEVICE=tiff24nc', '-sOutputFile=' + tf.name, path, '-c', 'quit', ] subprocess.Popen(gs_cmd) time.sleep(3) # Step 2: Enhance TIFF magick_cmd = [ 'convert', tf.name, '-colorspace', 'gray', '-type', 'grayscale', '-contrast-stretch', '0', '-sharpen', '0x1', 'tiff:-', ] p1 = subprocess.Popen(magick_cmd, stdout=subprocess.PIPE) tess_cmd = ['tesseract', '-l', language, '--oem', '1', '--psm', '3', 'stdin', 'stdout'] p2 = subprocess.Popen(tess_cmd, stdin=p1.stdout, stdout=subprocess.PIPE) out, err = p2.communicate() extracted_str = out return extracted_str
python
def to_text(path, language='fra'): """Wraps Tesseract 4 OCR with custom language model. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ import subprocess from distutils import spawn import tempfile import time # Check for dependencies. Needs Tesseract and Imagemagick installed. if not spawn.find_executable('tesseract'): raise EnvironmentError('tesseract not installed.') if not spawn.find_executable('convert'): raise EnvironmentError('imagemagick not installed.') if not spawn.find_executable('gs'): raise EnvironmentError('ghostscript not installed.') with tempfile.NamedTemporaryFile(suffix='.tiff') as tf: # Step 1: Convert to TIFF gs_cmd = [ 'gs', '-q', '-dNOPAUSE', '-r600x600', '-sDEVICE=tiff24nc', '-sOutputFile=' + tf.name, path, '-c', 'quit', ] subprocess.Popen(gs_cmd) time.sleep(3) # Step 2: Enhance TIFF magick_cmd = [ 'convert', tf.name, '-colorspace', 'gray', '-type', 'grayscale', '-contrast-stretch', '0', '-sharpen', '0x1', 'tiff:-', ] p1 = subprocess.Popen(magick_cmd, stdout=subprocess.PIPE) tess_cmd = ['tesseract', '-l', language, '--oem', '1', '--psm', '3', 'stdin', 'stdout'] p2 = subprocess.Popen(tess_cmd, stdin=p1.stdout, stdout=subprocess.PIPE) out, err = p2.communicate() extracted_str = out return extracted_str
[ "def", "to_text", "(", "path", ",", "language", "=", "'fra'", ")", ":", "import", "subprocess", "from", "distutils", "import", "spawn", "import", "tempfile", "import", "time", "# Check for dependencies. Needs Tesseract and Imagemagick installed.", "if", "not", "spawn", ...
Wraps Tesseract 4 OCR with custom language model. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format
[ "Wraps", "Tesseract", "4", "OCR", "with", "custom", "language", "model", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/tesseract4.py#L2-L69
train
222,005
invoice-x/invoice2data
src/invoice2data/input/gvision.py
to_text
def to_text(path, bucket_name='cloud-vision-84893', language='fr'): """Sends PDF files to Google Cloud Vision for OCR. Before using invoice2data, make sure you have the auth json path set as env var GOOGLE_APPLICATION_CREDENTIALS Parameters ---------- path : str path of electronic invoice in JPG or PNG format bucket_name : str name of bucket to use for file storage and results cache. Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ """OCR with PDF/TIFF as source files on GCS""" import os from google.cloud import vision from google.cloud import storage from google.protobuf import json_format # Supported mime_types are: 'application/pdf' and 'image/tiff' mime_type = 'application/pdf' path_dir, filename = os.path.split(path) result_blob_basename = filename.replace('.pdf', '').replace('.PDF', '') result_blob_name = result_blob_basename + '/output-1-to-1.json' result_blob_uri = 'gs://{}/{}/'.format(bucket_name, result_blob_basename) input_blob_uri = 'gs://{}/{}'.format(bucket_name, filename) # Upload file to gcloud if it doesn't exist yet storage_client = storage.Client() bucket = storage_client.get_bucket(bucket_name) if bucket.get_blob(filename) is None: blob = bucket.blob(filename) blob.upload_from_filename(path) # See if result already exists # TODO: upload as hash, not filename result_blob = bucket.get_blob(result_blob_name) if result_blob is None: # How many pages should be grouped into each json output file. batch_size = 10 client = vision.ImageAnnotatorClient() feature = vision.types.Feature(type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION) gcs_source = vision.types.GcsSource(uri=input_blob_uri) input_config = vision.types.InputConfig(gcs_source=gcs_source, mime_type=mime_type) gcs_destination = vision.types.GcsDestination(uri=result_blob_uri) output_config = vision.types.OutputConfig( gcs_destination=gcs_destination, batch_size=batch_size ) async_request = vision.types.AsyncAnnotateFileRequest( features=[feature], input_config=input_config, output_config=output_config ) operation = client.async_batch_annotate_files(requests=[async_request]) print('Waiting for the operation to finish.') operation.result(timeout=180) # Get result after OCR is completed result_blob = bucket.get_blob(result_blob_name) json_string = result_blob.download_as_string() response = json_format.Parse(json_string, vision.types.AnnotateFileResponse()) # The actual response for the first page of the input file. first_page_response = response.responses[0] annotation = first_page_response.full_text_annotation return annotation.text.encode('utf-8')
python
def to_text(path, bucket_name='cloud-vision-84893', language='fr'): """Sends PDF files to Google Cloud Vision for OCR. Before using invoice2data, make sure you have the auth json path set as env var GOOGLE_APPLICATION_CREDENTIALS Parameters ---------- path : str path of electronic invoice in JPG or PNG format bucket_name : str name of bucket to use for file storage and results cache. Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ """OCR with PDF/TIFF as source files on GCS""" import os from google.cloud import vision from google.cloud import storage from google.protobuf import json_format # Supported mime_types are: 'application/pdf' and 'image/tiff' mime_type = 'application/pdf' path_dir, filename = os.path.split(path) result_blob_basename = filename.replace('.pdf', '').replace('.PDF', '') result_blob_name = result_blob_basename + '/output-1-to-1.json' result_blob_uri = 'gs://{}/{}/'.format(bucket_name, result_blob_basename) input_blob_uri = 'gs://{}/{}'.format(bucket_name, filename) # Upload file to gcloud if it doesn't exist yet storage_client = storage.Client() bucket = storage_client.get_bucket(bucket_name) if bucket.get_blob(filename) is None: blob = bucket.blob(filename) blob.upload_from_filename(path) # See if result already exists # TODO: upload as hash, not filename result_blob = bucket.get_blob(result_blob_name) if result_blob is None: # How many pages should be grouped into each json output file. batch_size = 10 client = vision.ImageAnnotatorClient() feature = vision.types.Feature(type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION) gcs_source = vision.types.GcsSource(uri=input_blob_uri) input_config = vision.types.InputConfig(gcs_source=gcs_source, mime_type=mime_type) gcs_destination = vision.types.GcsDestination(uri=result_blob_uri) output_config = vision.types.OutputConfig( gcs_destination=gcs_destination, batch_size=batch_size ) async_request = vision.types.AsyncAnnotateFileRequest( features=[feature], input_config=input_config, output_config=output_config ) operation = client.async_batch_annotate_files(requests=[async_request]) print('Waiting for the operation to finish.') operation.result(timeout=180) # Get result after OCR is completed result_blob = bucket.get_blob(result_blob_name) json_string = result_blob.download_as_string() response = json_format.Parse(json_string, vision.types.AnnotateFileResponse()) # The actual response for the first page of the input file. first_page_response = response.responses[0] annotation = first_page_response.full_text_annotation return annotation.text.encode('utf-8')
[ "def", "to_text", "(", "path", ",", "bucket_name", "=", "'cloud-vision-84893'", ",", "language", "=", "'fr'", ")", ":", "\"\"\"OCR with PDF/TIFF as source files on GCS\"\"\"", "import", "os", "from", "google", ".", "cloud", "import", "vision", "from", "google", ".",...
Sends PDF files to Google Cloud Vision for OCR. Before using invoice2data, make sure you have the auth json path set as env var GOOGLE_APPLICATION_CREDENTIALS Parameters ---------- path : str path of electronic invoice in JPG or PNG format bucket_name : str name of bucket to use for file storage and results cache. Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format
[ "Sends", "PDF", "files", "to", "Google", "Cloud", "Vision", "for", "OCR", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/gvision.py#L2-L83
train
222,006
invoice-x/invoice2data
src/invoice2data/output/to_csv.py
write_to_file
def write_to_file(data, path): """Export extracted fields to csv Appends .csv to path if missing and generates csv file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated csv file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_csv >>> to_csv.write_to_file(data, "/exported_csv/invoice.csv") >>> to_csv.write_to_file(data, "invoice.csv") """ if path.endswith('.csv'): filename = path else: filename = path + '.csv' if sys.version_info[0] < 3: openfile = open(filename, "wb") else: openfile = open(filename, "w", newline='') with openfile as csv_file: writer = csv.writer(csv_file, delimiter=',') for line in data: first_row = [] for k, v in line.items(): first_row.append(k) writer.writerow(first_row) for line in data: csv_items = [] for k, v in line.items(): # first_row.append(k) if k == 'date': v = v.strftime('%d/%m/%Y') csv_items.append(v) writer.writerow(csv_items)
python
def write_to_file(data, path): """Export extracted fields to csv Appends .csv to path if missing and generates csv file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated csv file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_csv >>> to_csv.write_to_file(data, "/exported_csv/invoice.csv") >>> to_csv.write_to_file(data, "invoice.csv") """ if path.endswith('.csv'): filename = path else: filename = path + '.csv' if sys.version_info[0] < 3: openfile = open(filename, "wb") else: openfile = open(filename, "w", newline='') with openfile as csv_file: writer = csv.writer(csv_file, delimiter=',') for line in data: first_row = [] for k, v in line.items(): first_row.append(k) writer.writerow(first_row) for line in data: csv_items = [] for k, v in line.items(): # first_row.append(k) if k == 'date': v = v.strftime('%d/%m/%Y') csv_items.append(v) writer.writerow(csv_items)
[ "def", "write_to_file", "(", "data", ",", "path", ")", ":", "if", "path", ".", "endswith", "(", "'.csv'", ")", ":", "filename", "=", "path", "else", ":", "filename", "=", "path", "+", "'.csv'", "if", "sys", ".", "version_info", "[", "0", "]", "<", ...
Export extracted fields to csv Appends .csv to path if missing and generates csv file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated csv file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_csv >>> to_csv.write_to_file(data, "/exported_csv/invoice.csv") >>> to_csv.write_to_file(data, "invoice.csv")
[ "Export", "extracted", "fields", "to", "csv" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/output/to_csv.py#L5-L54
train
222,007
invoice-x/invoice2data
src/invoice2data/input/tesseract.py
to_text
def to_text(path): """Wraps Tesseract OCR. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ import subprocess from distutils import spawn # Check for dependencies. Needs Tesseract and Imagemagick installed. if not spawn.find_executable('tesseract'): raise EnvironmentError('tesseract not installed.') if not spawn.find_executable('convert'): raise EnvironmentError('imagemagick not installed.') # convert = "convert -density 350 %s -depth 8 tiff:-" % (path) convert = ['convert', '-density', '350', path, '-depth', '8', 'png:-'] p1 = subprocess.Popen(convert, stdout=subprocess.PIPE) tess = ['tesseract', 'stdin', 'stdout'] p2 = subprocess.Popen(tess, stdin=p1.stdout, stdout=subprocess.PIPE) out, err = p2.communicate() extracted_str = out return extracted_str
python
def to_text(path): """Wraps Tesseract OCR. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format """ import subprocess from distutils import spawn # Check for dependencies. Needs Tesseract and Imagemagick installed. if not spawn.find_executable('tesseract'): raise EnvironmentError('tesseract not installed.') if not spawn.find_executable('convert'): raise EnvironmentError('imagemagick not installed.') # convert = "convert -density 350 %s -depth 8 tiff:-" % (path) convert = ['convert', '-density', '350', path, '-depth', '8', 'png:-'] p1 = subprocess.Popen(convert, stdout=subprocess.PIPE) tess = ['tesseract', 'stdin', 'stdout'] p2 = subprocess.Popen(tess, stdin=p1.stdout, stdout=subprocess.PIPE) out, err = p2.communicate() extracted_str = out return extracted_str
[ "def", "to_text", "(", "path", ")", ":", "import", "subprocess", "from", "distutils", "import", "spawn", "# Check for dependencies. Needs Tesseract and Imagemagick installed.", "if", "not", "spawn", ".", "find_executable", "(", "'tesseract'", ")", ":", "raise", "Environ...
Wraps Tesseract OCR. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format
[ "Wraps", "Tesseract", "OCR", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/tesseract.py#L4-L38
train
222,008
invoice-x/invoice2data
src/invoice2data/extract/plugins/tables.py
extract
def extract(self, content, output): """Try to extract tables from an invoice""" for table in self['tables']: # First apply default options. plugin_settings = DEFAULT_OPTIONS.copy() plugin_settings.update(table) table = plugin_settings # Validate settings assert 'start' in table, 'Table start regex missing' assert 'end' in table, 'Table end regex missing' assert 'body' in table, 'Table body regex missing' start = re.search(table['start'], content) end = re.search(table['end'], content) if not start or not end: logger.warning('no table body found - start %s, end %s', start, end) continue table_body = content[start.end(): end.start()] for line in re.split(table['line_separator'], table_body): # if the line has empty lines in it , skip them if not line.strip('').strip('\n') or not line: continue match = re.search(table['body'], line) if match: for field, value in match.groupdict().items(): # If a field name already exists, do not overwrite it if field in output: continue if field.startswith('date') or field.endswith('date'): output[field] = self.parse_date(value) if not output[field]: logger.error("Date parsing failed on date '%s'", value) return None elif field.startswith('amount'): output[field] = self.parse_number(value) else: output[field] = value logger.debug('ignoring *%s* because it doesn\'t match anything', line)
python
def extract(self, content, output): """Try to extract tables from an invoice""" for table in self['tables']: # First apply default options. plugin_settings = DEFAULT_OPTIONS.copy() plugin_settings.update(table) table = plugin_settings # Validate settings assert 'start' in table, 'Table start regex missing' assert 'end' in table, 'Table end regex missing' assert 'body' in table, 'Table body regex missing' start = re.search(table['start'], content) end = re.search(table['end'], content) if not start or not end: logger.warning('no table body found - start %s, end %s', start, end) continue table_body = content[start.end(): end.start()] for line in re.split(table['line_separator'], table_body): # if the line has empty lines in it , skip them if not line.strip('').strip('\n') or not line: continue match = re.search(table['body'], line) if match: for field, value in match.groupdict().items(): # If a field name already exists, do not overwrite it if field in output: continue if field.startswith('date') or field.endswith('date'): output[field] = self.parse_date(value) if not output[field]: logger.error("Date parsing failed on date '%s'", value) return None elif field.startswith('amount'): output[field] = self.parse_number(value) else: output[field] = value logger.debug('ignoring *%s* because it doesn\'t match anything', line)
[ "def", "extract", "(", "self", ",", "content", ",", "output", ")", ":", "for", "table", "in", "self", "[", "'tables'", "]", ":", "# First apply default options.", "plugin_settings", "=", "DEFAULT_OPTIONS", ".", "copy", "(", ")", "plugin_settings", ".", "update...
Try to extract tables from an invoice
[ "Try", "to", "extract", "tables", "from", "an", "invoice" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/extract/plugins/tables.py#L11-L56
train
222,009
invoice-x/invoice2data
src/invoice2data/input/pdftotext.py
to_text
def to_text(path): """Wrapper around Poppler pdftotext. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- out : str returns extracted text from pdf Raises ------ EnvironmentError: If pdftotext library is not found """ import subprocess from distutils import spawn # py2 compat if spawn.find_executable("pdftotext"): # shutil.which('pdftotext'): out, err = subprocess.Popen( ["pdftotext", '-layout', '-enc', 'UTF-8', path, '-'], stdout=subprocess.PIPE ).communicate() return out else: raise EnvironmentError( 'pdftotext not installed. Can be downloaded from https://poppler.freedesktop.org/' )
python
def to_text(path): """Wrapper around Poppler pdftotext. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- out : str returns extracted text from pdf Raises ------ EnvironmentError: If pdftotext library is not found """ import subprocess from distutils import spawn # py2 compat if spawn.find_executable("pdftotext"): # shutil.which('pdftotext'): out, err = subprocess.Popen( ["pdftotext", '-layout', '-enc', 'UTF-8', path, '-'], stdout=subprocess.PIPE ).communicate() return out else: raise EnvironmentError( 'pdftotext not installed. Can be downloaded from https://poppler.freedesktop.org/' )
[ "def", "to_text", "(", "path", ")", ":", "import", "subprocess", "from", "distutils", "import", "spawn", "# py2 compat", "if", "spawn", ".", "find_executable", "(", "\"pdftotext\"", ")", ":", "# shutil.which('pdftotext'):", "out", ",", "err", "=", "subprocess", ...
Wrapper around Poppler pdftotext. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- out : str returns extracted text from pdf Raises ------ EnvironmentError: If pdftotext library is not found
[ "Wrapper", "around", "Poppler", "pdftotext", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/pdftotext.py#L2-L31
train
222,010
invoice-x/invoice2data
src/invoice2data/extract/invoice_template.py
InvoiceTemplate.prepare_input
def prepare_input(self, extracted_str): """ Input raw string and do transformations, as set in template file. """ # Remove withspace if self.options['remove_whitespace']: optimized_str = re.sub(' +', '', extracted_str) else: optimized_str = extracted_str # Remove accents if self.options['remove_accents']: optimized_str = unidecode(optimized_str) # convert to lower case if self.options['lowercase']: optimized_str = optimized_str.lower() # specific replace for replace in self.options['replace']: assert len(replace) == 2, 'A replace should be a list of 2 items' optimized_str = optimized_str.replace(replace[0], replace[1]) return optimized_str
python
def prepare_input(self, extracted_str): """ Input raw string and do transformations, as set in template file. """ # Remove withspace if self.options['remove_whitespace']: optimized_str = re.sub(' +', '', extracted_str) else: optimized_str = extracted_str # Remove accents if self.options['remove_accents']: optimized_str = unidecode(optimized_str) # convert to lower case if self.options['lowercase']: optimized_str = optimized_str.lower() # specific replace for replace in self.options['replace']: assert len(replace) == 2, 'A replace should be a list of 2 items' optimized_str = optimized_str.replace(replace[0], replace[1]) return optimized_str
[ "def", "prepare_input", "(", "self", ",", "extracted_str", ")", ":", "# Remove withspace", "if", "self", ".", "options", "[", "'remove_whitespace'", "]", ":", "optimized_str", "=", "re", ".", "sub", "(", "' +'", ",", "''", ",", "extracted_str", ")", "else", ...
Input raw string and do transformations, as set in template file.
[ "Input", "raw", "string", "and", "do", "transformations", "as", "set", "in", "template", "file", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/extract/invoice_template.py#L64-L88
train
222,011
invoice-x/invoice2data
src/invoice2data/extract/invoice_template.py
InvoiceTemplate.matches_input
def matches_input(self, optimized_str): """See if string matches keywords set in template file""" if all([keyword in optimized_str for keyword in self['keywords']]): logger.debug('Matched template %s', self['template_name']) return True
python
def matches_input(self, optimized_str): """See if string matches keywords set in template file""" if all([keyword in optimized_str for keyword in self['keywords']]): logger.debug('Matched template %s', self['template_name']) return True
[ "def", "matches_input", "(", "self", ",", "optimized_str", ")", ":", "if", "all", "(", "[", "keyword", "in", "optimized_str", "for", "keyword", "in", "self", "[", "'keywords'", "]", "]", ")", ":", "logger", ".", "debug", "(", "'Matched template %s'", ",", ...
See if string matches keywords set in template file
[ "See", "if", "string", "matches", "keywords", "set", "in", "template", "file" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/extract/invoice_template.py#L90-L95
train
222,012
invoice-x/invoice2data
src/invoice2data/extract/invoice_template.py
InvoiceTemplate.parse_date
def parse_date(self, value): """Parses date and returns date after parsing""" res = dateparser.parse( value, date_formats=self.options['date_formats'], languages=self.options['languages'] ) logger.debug("result of date parsing=%s", res) return res
python
def parse_date(self, value): """Parses date and returns date after parsing""" res = dateparser.parse( value, date_formats=self.options['date_formats'], languages=self.options['languages'] ) logger.debug("result of date parsing=%s", res) return res
[ "def", "parse_date", "(", "self", ",", "value", ")", ":", "res", "=", "dateparser", ".", "parse", "(", "value", ",", "date_formats", "=", "self", ".", "options", "[", "'date_formats'", "]", ",", "languages", "=", "self", ".", "options", "[", "'languages'...
Parses date and returns date after parsing
[ "Parses", "date", "and", "returns", "date", "after", "parsing" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/extract/invoice_template.py#L108-L114
train
222,013
invoice-x/invoice2data
src/invoice2data/output/to_json.py
write_to_file
def write_to_file(data, path): """Export extracted fields to json Appends .json to path if missing and generates json file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated json file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_json >>> to_json.write_to_file(data, "/exported_json/invoice.json") >>> to_json.write_to_file(data, "invoice.json") """ if path.endswith('.json'): filename = path else: filename = path + '.json' with codecs.open(filename, "w", encoding='utf-8') as json_file: for line in data: line['date'] = line['date'].strftime('%d/%m/%Y') print(type(json)) print(json) json.dump( data, json_file, indent=4, sort_keys=True, default=myconverter, ensure_ascii=False )
python
def write_to_file(data, path): """Export extracted fields to json Appends .json to path if missing and generates json file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated json file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_json >>> to_json.write_to_file(data, "/exported_json/invoice.json") >>> to_json.write_to_file(data, "invoice.json") """ if path.endswith('.json'): filename = path else: filename = path + '.json' with codecs.open(filename, "w", encoding='utf-8') as json_file: for line in data: line['date'] = line['date'].strftime('%d/%m/%Y') print(type(json)) print(json) json.dump( data, json_file, indent=4, sort_keys=True, default=myconverter, ensure_ascii=False )
[ "def", "write_to_file", "(", "data", ",", "path", ")", ":", "if", "path", ".", "endswith", "(", "'.json'", ")", ":", "filename", "=", "path", "else", ":", "filename", "=", "path", "+", "'.json'", "with", "codecs", ".", "open", "(", "filename", ",", "...
Export extracted fields to json Appends .json to path if missing and generates json file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated json file Notes ---- Do give file name to the function parameter path. Examples -------- >>> from invoice2data.output import to_json >>> to_json.write_to_file(data, "/exported_json/invoice.json") >>> to_json.write_to_file(data, "invoice.json")
[ "Export", "extracted", "fields", "to", "json" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/output/to_json.py#L12-L47
train
222,014
invoice-x/invoice2data
src/invoice2data/main.py
create_parser
def create_parser(): """Returns argument parser """ parser = argparse.ArgumentParser( description='Extract structured data from PDF files and save to CSV or JSON.' ) parser.add_argument( '--input-reader', choices=input_mapping.keys(), default='pdftotext', help='Choose text extraction function. Default: pdftotext', ) parser.add_argument( '--output-format', choices=output_mapping.keys(), default='none', help='Choose output format. Default: none', ) parser.add_argument( '--output-name', '-o', dest='output_name', default='invoices-output', help='Custom name for output file. Extension is added based on chosen format.', ) parser.add_argument( '--debug', dest='debug', action='store_true', help='Enable debug information.' ) parser.add_argument( '--copy', '-c', dest='copy', help='Copy and rename processed PDFs to specified folder.' ) parser.add_argument( '--move', '-m', dest='move', help='Move and rename processed PDFs to specified folder.' ) parser.add_argument( '--filename-format', dest='filename', default="{date} {invoice_number} {desc}.pdf", help='Filename format to use when moving or copying processed PDFs.' 'Default: "{date} {invoice_number} {desc}.pdf"', ) parser.add_argument( '--template-folder', '-t', dest='template_folder', help='Folder containing invoice templates in yml file. Always adds built-in templates.', ) parser.add_argument( '--exclude-built-in-templates', dest='exclude_built_in_templates', default=False, help='Ignore built-in templates.', action="store_true", ) parser.add_argument( 'input_files', type=argparse.FileType('r'), nargs='+', help='File or directory to analyze.' ) return parser
python
def create_parser(): """Returns argument parser """ parser = argparse.ArgumentParser( description='Extract structured data from PDF files and save to CSV or JSON.' ) parser.add_argument( '--input-reader', choices=input_mapping.keys(), default='pdftotext', help='Choose text extraction function. Default: pdftotext', ) parser.add_argument( '--output-format', choices=output_mapping.keys(), default='none', help='Choose output format. Default: none', ) parser.add_argument( '--output-name', '-o', dest='output_name', default='invoices-output', help='Custom name for output file. Extension is added based on chosen format.', ) parser.add_argument( '--debug', dest='debug', action='store_true', help='Enable debug information.' ) parser.add_argument( '--copy', '-c', dest='copy', help='Copy and rename processed PDFs to specified folder.' ) parser.add_argument( '--move', '-m', dest='move', help='Move and rename processed PDFs to specified folder.' ) parser.add_argument( '--filename-format', dest='filename', default="{date} {invoice_number} {desc}.pdf", help='Filename format to use when moving or copying processed PDFs.' 'Default: "{date} {invoice_number} {desc}.pdf"', ) parser.add_argument( '--template-folder', '-t', dest='template_folder', help='Folder containing invoice templates in yml file. Always adds built-in templates.', ) parser.add_argument( '--exclude-built-in-templates', dest='exclude_built_in_templates', default=False, help='Ignore built-in templates.', action="store_true", ) parser.add_argument( 'input_files', type=argparse.FileType('r'), nargs='+', help='File or directory to analyze.' ) return parser
[ "def", "create_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Extract structured data from PDF files and save to CSV or JSON.'", ")", "parser", ".", "add_argument", "(", "'--input-reader'", ",", "choices", "=", "inpu...
Returns argument parser
[ "Returns", "argument", "parser" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/main.py#L99-L167
train
222,015
invoice-x/invoice2data
src/invoice2data/main.py
main
def main(args=None): """Take folder or single file and analyze each.""" if args is None: parser = create_parser() args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) input_module = input_mapping[args.input_reader] output_module = output_mapping[args.output_format] templates = [] # Load templates from external folder if set. if args.template_folder: templates += read_templates(os.path.abspath(args.template_folder)) # Load internal templates, if not disabled. if not args.exclude_built_in_templates: templates += read_templates() output = [] for f in args.input_files: res = extract_data(f.name, templates=templates, input_module=input_module) if res: logger.info(res) output.append(res) if args.copy: filename = args.filename.format( date=res['date'].strftime('%Y-%m-%d'), invoice_number=res['invoice_number'], desc=res['desc'], ) shutil.copyfile(f.name, join(args.copy, filename)) if args.move: filename = args.filename.format( date=res['date'].strftime('%Y-%m-%d'), invoice_number=res['invoice_number'], desc=res['desc'], ) shutil.move(f.name, join(args.move, filename)) f.close() if output_module is not None: output_module.write_to_file(output, args.output_name)
python
def main(args=None): """Take folder or single file and analyze each.""" if args is None: parser = create_parser() args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) input_module = input_mapping[args.input_reader] output_module = output_mapping[args.output_format] templates = [] # Load templates from external folder if set. if args.template_folder: templates += read_templates(os.path.abspath(args.template_folder)) # Load internal templates, if not disabled. if not args.exclude_built_in_templates: templates += read_templates() output = [] for f in args.input_files: res = extract_data(f.name, templates=templates, input_module=input_module) if res: logger.info(res) output.append(res) if args.copy: filename = args.filename.format( date=res['date'].strftime('%Y-%m-%d'), invoice_number=res['invoice_number'], desc=res['desc'], ) shutil.copyfile(f.name, join(args.copy, filename)) if args.move: filename = args.filename.format( date=res['date'].strftime('%Y-%m-%d'), invoice_number=res['invoice_number'], desc=res['desc'], ) shutil.move(f.name, join(args.move, filename)) f.close() if output_module is not None: output_module.write_to_file(output, args.output_name)
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "parser", "=", "create_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "debug", ":", "logging", ".", "basicConfig", "(", "...
Take folder or single file and analyze each.
[ "Take", "folder", "or", "single", "file", "and", "analyze", "each", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/main.py#L170-L215
train
222,016
invoice-x/invoice2data
src/invoice2data/output/to_xml.py
write_to_file
def write_to_file(data, path): """Export extracted fields to xml Appends .xml to path if missing and generates xml file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated xml file Notes ---- Do give file name to the function parameter path. Only `date`, `desc`, `amount` and `currency` are exported Examples -------- >>> from invoice2data.output import to_xml >>> to_xml.write_to_file(data, "/exported_xml/invoice.xml") >>> to_xml.write_to_file(data, "invoice.xml") """ if path.endswith('.xml'): filename = path else: filename = path + '.xml' tag_data = ET.Element('data') xml_file = open(filename, "w") i = 0 for line in data: i += 1 tag_item = ET.SubElement(tag_data, 'item') tag_date = ET.SubElement(tag_item, 'date') tag_desc = ET.SubElement(tag_item, 'desc') tag_currency = ET.SubElement(tag_item, 'currency') tag_amount = ET.SubElement(tag_item, 'amount') tag_item.set('id', str(i)) tag_date.text = line['date'].strftime('%d/%m/%Y') tag_desc.text = line['desc'] tag_currency.text = line['currency'] tag_amount.text = str(line['amount']) xml_file.write(prettify(tag_data)) xml_file.close()
python
def write_to_file(data, path): """Export extracted fields to xml Appends .xml to path if missing and generates xml file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated xml file Notes ---- Do give file name to the function parameter path. Only `date`, `desc`, `amount` and `currency` are exported Examples -------- >>> from invoice2data.output import to_xml >>> to_xml.write_to_file(data, "/exported_xml/invoice.xml") >>> to_xml.write_to_file(data, "invoice.xml") """ if path.endswith('.xml'): filename = path else: filename = path + '.xml' tag_data = ET.Element('data') xml_file = open(filename, "w") i = 0 for line in data: i += 1 tag_item = ET.SubElement(tag_data, 'item') tag_date = ET.SubElement(tag_item, 'date') tag_desc = ET.SubElement(tag_item, 'desc') tag_currency = ET.SubElement(tag_item, 'currency') tag_amount = ET.SubElement(tag_item, 'amount') tag_item.set('id', str(i)) tag_date.text = line['date'].strftime('%d/%m/%Y') tag_desc.text = line['desc'] tag_currency.text = line['currency'] tag_amount.text = str(line['amount']) xml_file.write(prettify(tag_data)) xml_file.close()
[ "def", "write_to_file", "(", "data", ",", "path", ")", ":", "if", "path", ".", "endswith", "(", "'.xml'", ")", ":", "filename", "=", "path", "else", ":", "filename", "=", "path", "+", "'.xml'", "tag_data", "=", "ET", ".", "Element", "(", "'data'", ")...
Export extracted fields to xml Appends .xml to path if missing and generates xml file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated xml file Notes ---- Do give file name to the function parameter path. Only `date`, `desc`, `amount` and `currency` are exported Examples -------- >>> from invoice2data.output import to_xml >>> to_xml.write_to_file(data, "/exported_xml/invoice.xml") >>> to_xml.write_to_file(data, "invoice.xml")
[ "Export", "extracted", "fields", "to", "xml" ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/output/to_xml.py#L12-L59
train
222,017
invoice-x/invoice2data
src/invoice2data/extract/loader.py
read_templates
def read_templates(folder=None): """ Load yaml templates from template folder. Return list of dicts. Use built-in templates if no folder is set. Parameters ---------- folder : str user defined folder where they stores their files, if None uses built-in templates Returns ------- output : Instance of `InvoiceTemplate` template which match based on keywords Examples -------- >>> read_template("home/duskybomb/invoice-templates/") InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'), ('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])), ('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'), ('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')]) After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from `extract_data()` >>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'), ('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])), ('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'), ('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')]) >>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext) {'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087', 'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'} """ output = [] if folder is None: folder = pkg_resources.resource_filename(__name__, 'templates') for path, subdirs, files in os.walk(folder): for name in sorted(files): if name.endswith('.yml'): with open(os.path.join(path, name), 'rb') as f: encoding = chardet.detect(f.read())['encoding'] with codecs.open(os.path.join(path, name), encoding=encoding) as template_file: tpl = ordered_load(template_file.read()) tpl['template_name'] = name # Test if all required fields are in template: assert 'keywords' in tpl.keys(), 'Missing keywords field.' # Keywords as list, if only one. if type(tpl['keywords']) is not list: tpl['keywords'] = [tpl['keywords']] output.append(InvoiceTemplate(tpl)) return output
python
def read_templates(folder=None): """ Load yaml templates from template folder. Return list of dicts. Use built-in templates if no folder is set. Parameters ---------- folder : str user defined folder where they stores their files, if None uses built-in templates Returns ------- output : Instance of `InvoiceTemplate` template which match based on keywords Examples -------- >>> read_template("home/duskybomb/invoice-templates/") InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'), ('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])), ('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'), ('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')]) After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from `extract_data()` >>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'), ('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])), ('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'), ('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')]) >>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext) {'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087', 'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'} """ output = [] if folder is None: folder = pkg_resources.resource_filename(__name__, 'templates') for path, subdirs, files in os.walk(folder): for name in sorted(files): if name.endswith('.yml'): with open(os.path.join(path, name), 'rb') as f: encoding = chardet.detect(f.read())['encoding'] with codecs.open(os.path.join(path, name), encoding=encoding) as template_file: tpl = ordered_load(template_file.read()) tpl['template_name'] = name # Test if all required fields are in template: assert 'keywords' in tpl.keys(), 'Missing keywords field.' # Keywords as list, if only one. if type(tpl['keywords']) is not list: tpl['keywords'] = [tpl['keywords']] output.append(InvoiceTemplate(tpl)) return output
[ "def", "read_templates", "(", "folder", "=", "None", ")", ":", "output", "=", "[", "]", "if", "folder", "is", "None", ":", "folder", "=", "pkg_resources", ".", "resource_filename", "(", "__name__", ",", "'templates'", ")", "for", "path", ",", "subdirs", ...
Load yaml templates from template folder. Return list of dicts. Use built-in templates if no folder is set. Parameters ---------- folder : str user defined folder where they stores their files, if None uses built-in templates Returns ------- output : Instance of `InvoiceTemplate` template which match based on keywords Examples -------- >>> read_template("home/duskybomb/invoice-templates/") InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'), ('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])), ('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'), ('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')]) After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from `extract_data()` >>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'), ('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])), ('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'), ('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')]) >>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext) {'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087', 'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
[ "Load", "yaml", "templates", "from", "template", "folder", ".", "Return", "list", "of", "dicts", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/extract/loader.py#L39-L99
train
222,018
invoice-x/invoice2data
src/invoice2data/input/pdfminer_wrapper.py
to_text
def to_text(path): """Wrapper around `pdfminer`. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- str : str returns extracted text from pdf """ try: # python 2 from StringIO import StringIO import sys reload(sys) # noqa: F821 sys.setdefaultencoding('utf8') except ImportError: from io import StringIO from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfpage import PDFPage rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() laparams.all_texts = True device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) with open(path, 'rb') as fp: interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() pages = PDFPage.get_pages( fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True, ) for page in pages: interpreter.process_page(page) device.close() str = retstr.getvalue() retstr.close() return str.encode('utf-8')
python
def to_text(path): """Wrapper around `pdfminer`. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- str : str returns extracted text from pdf """ try: # python 2 from StringIO import StringIO import sys reload(sys) # noqa: F821 sys.setdefaultencoding('utf8') except ImportError: from io import StringIO from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfpage import PDFPage rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() laparams.all_texts = True device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) with open(path, 'rb') as fp: interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() pages = PDFPage.get_pages( fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True, ) for page in pages: interpreter.process_page(page) device.close() str = retstr.getvalue() retstr.close() return str.encode('utf-8')
[ "def", "to_text", "(", "path", ")", ":", "try", ":", "# python 2", "from", "StringIO", "import", "StringIO", "import", "sys", "reload", "(", "sys", ")", "# noqa: F821", "sys", ".", "setdefaultencoding", "(", "'utf8'", ")", "except", "ImportError", ":", "from...
Wrapper around `pdfminer`. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- str : str returns extracted text from pdf
[ "Wrapper", "around", "pdfminer", "." ]
d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/pdfminer_wrapper.py#L2-L57
train
222,019
SALib/SALib
src/SALib/analyze/ff.py
analyze
def analyze(problem, X, Y, second_order=False, print_to_console=False, seed=None): """Perform a fractional factorial analysis Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect). The techniques bulks out the number of parameters with dummy parameters to the nearest 2**n. Any results involving dummy parameters could indicate a problem with the model runs. Arguments --------- problem: dict The problem definition X: numpy.matrix The NumPy matrix containing the model inputs Y: numpy.array The NumPy array containing the model outputs second_order: bool, default=False Include interaction effects print_to_console: bool, default=False Print results directly to console Returns ------- Si: dict A dictionary of sensitivity indices, including main effects ``ME``, and interaction effects ``IE`` (if ``second_order`` is True) Examples -------- >>> X = sample(problem) >>> Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) >>> analyze(problem, X, Y, second_order=True, print_to_console=True) """ if seed: np.random.seed(seed) problem = extend_bounds(problem) num_vars = problem['num_vars'] X = generate_contrast(problem) main_effect = (1. / (2 * num_vars)) * np.dot(Y, X) Si = ResultDict((k, [None] * num_vars) for k in ['names', 'ME']) Si['ME'] = main_effect Si['names'] = problem['names'] if print_to_console: print("Parameter ME") for j in range(num_vars): print("%s %f" % (problem['names'][j], Si['ME'][j])) if second_order: interaction_names, interaction_effects = interactions(problem, Y, print_to_console) Si['interaction_names'] = interaction_names Si['IE'] = interaction_effects Si.to_df = MethodType(to_df, Si) return Si
python
def analyze(problem, X, Y, second_order=False, print_to_console=False, seed=None): """Perform a fractional factorial analysis Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect). The techniques bulks out the number of parameters with dummy parameters to the nearest 2**n. Any results involving dummy parameters could indicate a problem with the model runs. Arguments --------- problem: dict The problem definition X: numpy.matrix The NumPy matrix containing the model inputs Y: numpy.array The NumPy array containing the model outputs second_order: bool, default=False Include interaction effects print_to_console: bool, default=False Print results directly to console Returns ------- Si: dict A dictionary of sensitivity indices, including main effects ``ME``, and interaction effects ``IE`` (if ``second_order`` is True) Examples -------- >>> X = sample(problem) >>> Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) >>> analyze(problem, X, Y, second_order=True, print_to_console=True) """ if seed: np.random.seed(seed) problem = extend_bounds(problem) num_vars = problem['num_vars'] X = generate_contrast(problem) main_effect = (1. / (2 * num_vars)) * np.dot(Y, X) Si = ResultDict((k, [None] * num_vars) for k in ['names', 'ME']) Si['ME'] = main_effect Si['names'] = problem['names'] if print_to_console: print("Parameter ME") for j in range(num_vars): print("%s %f" % (problem['names'][j], Si['ME'][j])) if second_order: interaction_names, interaction_effects = interactions(problem, Y, print_to_console) Si['interaction_names'] = interaction_names Si['IE'] = interaction_effects Si.to_df = MethodType(to_df, Si) return Si
[ "def", "analyze", "(", "problem", ",", "X", ",", "Y", ",", "second_order", "=", "False", ",", "print_to_console", "=", "False", ",", "seed", "=", "None", ")", ":", "if", "seed", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "problem", "=...
Perform a fractional factorial analysis Returns a dictionary with keys 'ME' (main effect) and 'IE' (interaction effect). The techniques bulks out the number of parameters with dummy parameters to the nearest 2**n. Any results involving dummy parameters could indicate a problem with the model runs. Arguments --------- problem: dict The problem definition X: numpy.matrix The NumPy matrix containing the model inputs Y: numpy.array The NumPy array containing the model outputs second_order: bool, default=False Include interaction effects print_to_console: bool, default=False Print results directly to console Returns ------- Si: dict A dictionary of sensitivity indices, including main effects ``ME``, and interaction effects ``IE`` (if ``second_order`` is True) Examples -------- >>> X = sample(problem) >>> Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (0.2 + X[:, 0])) >>> analyze(problem, X, Y, second_order=True, print_to_console=True)
[ "Perform", "a", "fractional", "factorial", "analysis" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/ff.py#L18-L81
train
222,020
SALib/SALib
src/SALib/analyze/ff.py
to_df
def to_df(self): '''Conversion method to Pandas DataFrame. To be attached to ResultDict. Returns ------- main_effect, inter_effect: tuple A tuple of DataFrames for main effects and interaction effects. The second element (for interactions) will be `None` if not available. ''' names = self['names'] main_effect = self['ME'] interactions = self.get('IE', None) inter_effect = None if interactions: interaction_names = self.get('interaction_names') names = [name for name in names if not isinstance(name, list)] inter_effect = pd.DataFrame({'IE': interactions}, index=interaction_names) main_effect = pd.DataFrame({'ME': main_effect}, index=names) return main_effect, inter_effect
python
def to_df(self): '''Conversion method to Pandas DataFrame. To be attached to ResultDict. Returns ------- main_effect, inter_effect: tuple A tuple of DataFrames for main effects and interaction effects. The second element (for interactions) will be `None` if not available. ''' names = self['names'] main_effect = self['ME'] interactions = self.get('IE', None) inter_effect = None if interactions: interaction_names = self.get('interaction_names') names = [name for name in names if not isinstance(name, list)] inter_effect = pd.DataFrame({'IE': interactions}, index=interaction_names) main_effect = pd.DataFrame({'ME': main_effect}, index=names) return main_effect, inter_effect
[ "def", "to_df", "(", "self", ")", ":", "names", "=", "self", "[", "'names'", "]", "main_effect", "=", "self", "[", "'ME'", "]", "interactions", "=", "self", ".", "get", "(", "'IE'", ",", "None", ")", "inter_effect", "=", "None", "if", "interactions", ...
Conversion method to Pandas DataFrame. To be attached to ResultDict. Returns ------- main_effect, inter_effect: tuple A tuple of DataFrames for main effects and interaction effects. The second element (for interactions) will be `None` if not available.
[ "Conversion", "method", "to", "Pandas", "DataFrame", ".", "To", "be", "attached", "to", "ResultDict", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/ff.py#L84-L106
train
222,021
SALib/SALib
src/SALib/analyze/ff.py
interactions
def interactions(problem, Y, print_to_console=False): """Computes the second order effects Computes the second order effects (interactions) between all combinations of pairs of input factors Arguments --------- problem: dict The problem definition Y: numpy.array The NumPy array containing the model outputs print_to_console: bool, default=False Print results directly to console Returns ------- ie_names: list The names of the interaction pairs IE: list The sensitivity indices for the pairwise interactions """ names = problem['names'] num_vars = problem['num_vars'] X = generate_contrast(problem) ie_names = [] IE = [] for col in range(X.shape[1]): for col_2 in range(col): x = X[:, col] * X[:, col_2] var_names = (names[col_2], names[col]) ie_names.append(var_names) IE.append((1. / (2 * num_vars)) * np.dot(Y, x)) if print_to_console: [print('%s %f' % (n, i)) for (n, i) in zip(ie_names, IE)] return ie_names, IE
python
def interactions(problem, Y, print_to_console=False): """Computes the second order effects Computes the second order effects (interactions) between all combinations of pairs of input factors Arguments --------- problem: dict The problem definition Y: numpy.array The NumPy array containing the model outputs print_to_console: bool, default=False Print results directly to console Returns ------- ie_names: list The names of the interaction pairs IE: list The sensitivity indices for the pairwise interactions """ names = problem['names'] num_vars = problem['num_vars'] X = generate_contrast(problem) ie_names = [] IE = [] for col in range(X.shape[1]): for col_2 in range(col): x = X[:, col] * X[:, col_2] var_names = (names[col_2], names[col]) ie_names.append(var_names) IE.append((1. / (2 * num_vars)) * np.dot(Y, x)) if print_to_console: [print('%s %f' % (n, i)) for (n, i) in zip(ie_names, IE)] return ie_names, IE
[ "def", "interactions", "(", "problem", ",", "Y", ",", "print_to_console", "=", "False", ")", ":", "names", "=", "problem", "[", "'names'", "]", "num_vars", "=", "problem", "[", "'num_vars'", "]", "X", "=", "generate_contrast", "(", "problem", ")", "ie_name...
Computes the second order effects Computes the second order effects (interactions) between all combinations of pairs of input factors Arguments --------- problem: dict The problem definition Y: numpy.array The NumPy array containing the model outputs print_to_console: bool, default=False Print results directly to console Returns ------- ie_names: list The names of the interaction pairs IE: list The sensitivity indices for the pairwise interactions
[ "Computes", "the", "second", "order", "effects" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/ff.py#L109-L150
train
222,022
SALib/SALib
src/SALib/util/__init__.py
avail_approaches
def avail_approaches(pkg): '''Create list of available modules. Arguments --------- pkg : module module to inspect Returns --------- method : list A list of available submodules ''' methods = [modname for importer, modname, ispkg in pkgutil.walk_packages(path=pkg.__path__) if modname not in ['common_args', 'directions', 'sobol_sequence']] return methods
python
def avail_approaches(pkg): '''Create list of available modules. Arguments --------- pkg : module module to inspect Returns --------- method : list A list of available submodules ''' methods = [modname for importer, modname, ispkg in pkgutil.walk_packages(path=pkg.__path__) if modname not in ['common_args', 'directions', 'sobol_sequence']] return methods
[ "def", "avail_approaches", "(", "pkg", ")", ":", "methods", "=", "[", "modname", "for", "importer", ",", "modname", ",", "ispkg", "in", "pkgutil", ".", "walk_packages", "(", "path", "=", "pkg", ".", "__path__", ")", "if", "modname", "not", "in", "[", "...
Create list of available modules. Arguments --------- pkg : module module to inspect Returns --------- method : list A list of available submodules
[ "Create", "list", "of", "available", "modules", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L18-L35
train
222,023
SALib/SALib
src/SALib/util/__init__.py
scale_samples
def scale_samples(params, bounds): '''Rescale samples in 0-to-1 range to arbitrary bounds Arguments --------- bounds : list list of lists of dimensions `num_params`-by-2 params : numpy.ndarray numpy array of dimensions `num_params`-by-:math:`N`, where :math:`N` is the number of samples ''' # Check bounds are legal (upper bound is greater than lower bound) b = np.array(bounds) lower_bounds = b[:, 0] upper_bounds = b[:, 1] if np.any(lower_bounds >= upper_bounds): raise ValueError("Bounds are not legal") # This scales the samples in-place, by using the optional output # argument for the numpy ufunctions # The calculation is equivalent to: # sample * (upper_bound - lower_bound) + lower_bound np.add(np.multiply(params, (upper_bounds - lower_bounds), out=params), lower_bounds, out=params)
python
def scale_samples(params, bounds): '''Rescale samples in 0-to-1 range to arbitrary bounds Arguments --------- bounds : list list of lists of dimensions `num_params`-by-2 params : numpy.ndarray numpy array of dimensions `num_params`-by-:math:`N`, where :math:`N` is the number of samples ''' # Check bounds are legal (upper bound is greater than lower bound) b = np.array(bounds) lower_bounds = b[:, 0] upper_bounds = b[:, 1] if np.any(lower_bounds >= upper_bounds): raise ValueError("Bounds are not legal") # This scales the samples in-place, by using the optional output # argument for the numpy ufunctions # The calculation is equivalent to: # sample * (upper_bound - lower_bound) + lower_bound np.add(np.multiply(params, (upper_bounds - lower_bounds), out=params), lower_bounds, out=params)
[ "def", "scale_samples", "(", "params", ",", "bounds", ")", ":", "# Check bounds are legal (upper bound is greater than lower bound)", "b", "=", "np", ".", "array", "(", "bounds", ")", "lower_bounds", "=", "b", "[", ":", ",", "0", "]", "upper_bounds", "=", "b", ...
Rescale samples in 0-to-1 range to arbitrary bounds Arguments --------- bounds : list list of lists of dimensions `num_params`-by-2 params : numpy.ndarray numpy array of dimensions `num_params`-by-:math:`N`, where :math:`N` is the number of samples
[ "Rescale", "samples", "in", "0", "-", "to", "-", "1", "range", "to", "arbitrary", "bounds" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L38-L65
train
222,024
SALib/SALib
src/SALib/util/__init__.py
nonuniform_scale_samples
def nonuniform_scale_samples(params, bounds, dists): """Rescale samples in 0-to-1 range to other distributions Arguments --------- problem : dict problem definition including bounds params : numpy.ndarray numpy array of dimensions num_params-by-N, where N is the number of samples dists : list list of distributions, one for each parameter unif: uniform with lower and upper bounds triang: triangular with width (scale) and location of peak location of peak is in percentage of width lower bound assumed to be zero norm: normal distribution with mean and standard deviation lognorm: lognormal with ln-space mean and standard deviation """ b = np.array(bounds) # initializing matrix for converted values conv_params = np.zeros_like(params) # loop over the parameters for i in range(conv_params.shape[1]): # setting first and second arguments for distributions b1 = b[i][0] b2 = b[i][1] if dists[i] == 'triang': # checking for correct parameters if b1 <= 0 or b2 <= 0 or b2 >= 1: raise ValueError('''Triangular distribution: Scale must be greater than zero; peak on interval [0,1]''') else: conv_params[:, i] = sp.stats.triang.ppf( params[:, i], c=b2, scale=b1, loc=0) elif dists[i] == 'unif': if b1 >= b2: raise ValueError('''Uniform distribution: lower bound must be less than upper bound''') else: conv_params[:, i] = params[:, i] * (b2 - b1) + b1 elif dists[i] == 'norm': if b2 <= 0: raise ValueError('''Normal distribution: stdev must be > 0''') else: conv_params[:, i] = sp.stats.norm.ppf( params[:, i], loc=b1, scale=b2) # lognormal distribution (ln-space, not base-10) # paramters are ln-space mean and standard deviation elif dists[i] == 'lognorm': # checking for valid parameters if b2 <= 0: raise ValueError( '''Lognormal distribution: stdev must be > 0''') else: conv_params[:, i] = np.exp( sp.stats.norm.ppf(params[:, i], loc=b1, scale=b2)) else: valid_dists = ['unif', 'triang', 'norm', 'lognorm'] raise ValueError('Distributions: choose one of %s' % ", ".join(valid_dists)) return conv_params
python
def nonuniform_scale_samples(params, bounds, dists): """Rescale samples in 0-to-1 range to other distributions Arguments --------- problem : dict problem definition including bounds params : numpy.ndarray numpy array of dimensions num_params-by-N, where N is the number of samples dists : list list of distributions, one for each parameter unif: uniform with lower and upper bounds triang: triangular with width (scale) and location of peak location of peak is in percentage of width lower bound assumed to be zero norm: normal distribution with mean and standard deviation lognorm: lognormal with ln-space mean and standard deviation """ b = np.array(bounds) # initializing matrix for converted values conv_params = np.zeros_like(params) # loop over the parameters for i in range(conv_params.shape[1]): # setting first and second arguments for distributions b1 = b[i][0] b2 = b[i][1] if dists[i] == 'triang': # checking for correct parameters if b1 <= 0 or b2 <= 0 or b2 >= 1: raise ValueError('''Triangular distribution: Scale must be greater than zero; peak on interval [0,1]''') else: conv_params[:, i] = sp.stats.triang.ppf( params[:, i], c=b2, scale=b1, loc=0) elif dists[i] == 'unif': if b1 >= b2: raise ValueError('''Uniform distribution: lower bound must be less than upper bound''') else: conv_params[:, i] = params[:, i] * (b2 - b1) + b1 elif dists[i] == 'norm': if b2 <= 0: raise ValueError('''Normal distribution: stdev must be > 0''') else: conv_params[:, i] = sp.stats.norm.ppf( params[:, i], loc=b1, scale=b2) # lognormal distribution (ln-space, not base-10) # paramters are ln-space mean and standard deviation elif dists[i] == 'lognorm': # checking for valid parameters if b2 <= 0: raise ValueError( '''Lognormal distribution: stdev must be > 0''') else: conv_params[:, i] = np.exp( sp.stats.norm.ppf(params[:, i], loc=b1, scale=b2)) else: valid_dists = ['unif', 'triang', 'norm', 'lognorm'] raise ValueError('Distributions: choose one of %s' % ", ".join(valid_dists)) return conv_params
[ "def", "nonuniform_scale_samples", "(", "params", ",", "bounds", ",", "dists", ")", ":", "b", "=", "np", ".", "array", "(", "bounds", ")", "# initializing matrix for converted values", "conv_params", "=", "np", ".", "zeros_like", "(", "params", ")", "# loop over...
Rescale samples in 0-to-1 range to other distributions Arguments --------- problem : dict problem definition including bounds params : numpy.ndarray numpy array of dimensions num_params-by-N, where N is the number of samples dists : list list of distributions, one for each parameter unif: uniform with lower and upper bounds triang: triangular with width (scale) and location of peak location of peak is in percentage of width lower bound assumed to be zero norm: normal distribution with mean and standard deviation lognorm: lognormal with ln-space mean and standard deviation
[ "Rescale", "samples", "in", "0", "-", "to", "-", "1", "range", "to", "other", "distributions" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L96-L165
train
222,025
SALib/SALib
src/SALib/util/__init__.py
read_param_file
def read_param_file(filename, delimiter=None): """Unpacks a parameter file into a dictionary Reads a parameter file of format:: Param1,0,1,Group1,dist1 Param2,0,1,Group2,dist2 Param3,0,1,Group3,dist3 (Group and Dist columns are optional) Returns a dictionary containing: - names - the names of the parameters - bounds - a list of lists of lower and upper bounds - num_vars - a scalar indicating the number of variables (the length of names) - groups - a list of group names (strings) for each variable - dists - a list of distributions for the problem, None if not specified or all uniform Arguments --------- filename : str The path to the parameter file delimiter : str, default=None The delimiter used in the file to distinguish between columns """ names = [] bounds = [] groups = [] dists = [] num_vars = 0 fieldnames = ['name', 'lower_bound', 'upper_bound', 'group', 'dist'] with open(filename, 'rU') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter) csvfile.seek(0) reader = csv.DictReader( csvfile, fieldnames=fieldnames, dialect=dialect) for row in reader: if row['name'].strip().startswith('#'): pass else: num_vars += 1 names.append(row['name']) bounds.append( [float(row['lower_bound']), float(row['upper_bound'])]) # If the fourth column does not contain a group name, use # the parameter name if row['group'] is None: groups.append(row['name']) elif row['group'] is 'NA': groups.append(row['name']) else: groups.append(row['group']) # If the fifth column does not contain a distribution # use uniform if row['dist'] is None: dists.append('unif') else: dists.append(row['dist']) if groups == names: groups = None elif len(set(groups)) == 1: raise ValueError('''Only one group defined, results will not be meaningful''') # setting dists to none if all are uniform # because non-uniform scaling is not needed if all([d == 'unif' for d in dists]): dists = None return {'names': names, 'bounds': bounds, 'num_vars': num_vars, 'groups': groups, 'dists': dists}
python
def read_param_file(filename, delimiter=None): """Unpacks a parameter file into a dictionary Reads a parameter file of format:: Param1,0,1,Group1,dist1 Param2,0,1,Group2,dist2 Param3,0,1,Group3,dist3 (Group and Dist columns are optional) Returns a dictionary containing: - names - the names of the parameters - bounds - a list of lists of lower and upper bounds - num_vars - a scalar indicating the number of variables (the length of names) - groups - a list of group names (strings) for each variable - dists - a list of distributions for the problem, None if not specified or all uniform Arguments --------- filename : str The path to the parameter file delimiter : str, default=None The delimiter used in the file to distinguish between columns """ names = [] bounds = [] groups = [] dists = [] num_vars = 0 fieldnames = ['name', 'lower_bound', 'upper_bound', 'group', 'dist'] with open(filename, 'rU') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter) csvfile.seek(0) reader = csv.DictReader( csvfile, fieldnames=fieldnames, dialect=dialect) for row in reader: if row['name'].strip().startswith('#'): pass else: num_vars += 1 names.append(row['name']) bounds.append( [float(row['lower_bound']), float(row['upper_bound'])]) # If the fourth column does not contain a group name, use # the parameter name if row['group'] is None: groups.append(row['name']) elif row['group'] is 'NA': groups.append(row['name']) else: groups.append(row['group']) # If the fifth column does not contain a distribution # use uniform if row['dist'] is None: dists.append('unif') else: dists.append(row['dist']) if groups == names: groups = None elif len(set(groups)) == 1: raise ValueError('''Only one group defined, results will not be meaningful''') # setting dists to none if all are uniform # because non-uniform scaling is not needed if all([d == 'unif' for d in dists]): dists = None return {'names': names, 'bounds': bounds, 'num_vars': num_vars, 'groups': groups, 'dists': dists}
[ "def", "read_param_file", "(", "filename", ",", "delimiter", "=", "None", ")", ":", "names", "=", "[", "]", "bounds", "=", "[", "]", "groups", "=", "[", "]", "dists", "=", "[", "]", "num_vars", "=", "0", "fieldnames", "=", "[", "'name'", ",", "'low...
Unpacks a parameter file into a dictionary Reads a parameter file of format:: Param1,0,1,Group1,dist1 Param2,0,1,Group2,dist2 Param3,0,1,Group3,dist3 (Group and Dist columns are optional) Returns a dictionary containing: - names - the names of the parameters - bounds - a list of lists of lower and upper bounds - num_vars - a scalar indicating the number of variables (the length of names) - groups - a list of group names (strings) for each variable - dists - a list of distributions for the problem, None if not specified or all uniform Arguments --------- filename : str The path to the parameter file delimiter : str, default=None The delimiter used in the file to distinguish between columns
[ "Unpacks", "a", "parameter", "file", "into", "a", "dictionary" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L168-L245
train
222,026
SALib/SALib
src/SALib/util/__init__.py
compute_groups_matrix
def compute_groups_matrix(groups): """Generate matrix which notes factor membership of groups Computes a k-by-g matrix which notes factor membership of groups where: k is the number of variables (factors) g is the number of groups Also returns a g-length list of unique group_names whose positions correspond to the order of groups in the k-by-g matrix Arguments --------- groups : list Group names corresponding to each variable Returns ------- tuple containing group matrix assigning parameters to groups and a list of unique group names """ if not groups: return None num_vars = len(groups) # Get a unique set of the group names unique_group_names = list(OrderedDict.fromkeys(groups)) number_of_groups = len(unique_group_names) indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)]) output = np.zeros((num_vars, number_of_groups), dtype=np.int) for parameter_row, group_membership in enumerate(groups): group_index = indices[group_membership] output[parameter_row, group_index] = 1 return output, unique_group_names
python
def compute_groups_matrix(groups): """Generate matrix which notes factor membership of groups Computes a k-by-g matrix which notes factor membership of groups where: k is the number of variables (factors) g is the number of groups Also returns a g-length list of unique group_names whose positions correspond to the order of groups in the k-by-g matrix Arguments --------- groups : list Group names corresponding to each variable Returns ------- tuple containing group matrix assigning parameters to groups and a list of unique group names """ if not groups: return None num_vars = len(groups) # Get a unique set of the group names unique_group_names = list(OrderedDict.fromkeys(groups)) number_of_groups = len(unique_group_names) indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)]) output = np.zeros((num_vars, number_of_groups), dtype=np.int) for parameter_row, group_membership in enumerate(groups): group_index = indices[group_membership] output[parameter_row, group_index] = 1 return output, unique_group_names
[ "def", "compute_groups_matrix", "(", "groups", ")", ":", "if", "not", "groups", ":", "return", "None", "num_vars", "=", "len", "(", "groups", ")", "# Get a unique set of the group names", "unique_group_names", "=", "list", "(", "OrderedDict", ".", "fromkeys", "(",...
Generate matrix which notes factor membership of groups Computes a k-by-g matrix which notes factor membership of groups where: k is the number of variables (factors) g is the number of groups Also returns a g-length list of unique group_names whose positions correspond to the order of groups in the k-by-g matrix Arguments --------- groups : list Group names corresponding to each variable Returns ------- tuple containing group matrix assigning parameters to groups and a list of unique group names
[ "Generate", "matrix", "which", "notes", "factor", "membership", "of", "groups" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L248-L286
train
222,027
SALib/SALib
src/SALib/util/__init__.py
requires_gurobipy
def requires_gurobipy(_has_gurobi): ''' Decorator function which takes a boolean _has_gurobi as an argument. Use decorate any functions which require gurobi. Raises an import error at runtime if gurobi is not present. Note that all runtime errors should be avoided in the working code, using brute force options as preference. ''' def _outer_wrapper(wrapped_function): def _wrapper(*args, **kwargs): if _has_gurobi: result = wrapped_function(*args, **kwargs) else: warn("Gurobi not available", ImportWarning) result = None return result return _wrapper return _outer_wrapper
python
def requires_gurobipy(_has_gurobi): ''' Decorator function which takes a boolean _has_gurobi as an argument. Use decorate any functions which require gurobi. Raises an import error at runtime if gurobi is not present. Note that all runtime errors should be avoided in the working code, using brute force options as preference. ''' def _outer_wrapper(wrapped_function): def _wrapper(*args, **kwargs): if _has_gurobi: result = wrapped_function(*args, **kwargs) else: warn("Gurobi not available", ImportWarning) result = None return result return _wrapper return _outer_wrapper
[ "def", "requires_gurobipy", "(", "_has_gurobi", ")", ":", "def", "_outer_wrapper", "(", "wrapped_function", ")", ":", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "_has_gurobi", ":", "result", "=", "wrapped_function", "(", ...
Decorator function which takes a boolean _has_gurobi as an argument. Use decorate any functions which require gurobi. Raises an import error at runtime if gurobi is not present. Note that all runtime errors should be avoided in the working code, using brute force options as preference.
[ "Decorator", "function", "which", "takes", "a", "boolean", "_has_gurobi", "as", "an", "argument", ".", "Use", "decorate", "any", "functions", "which", "require", "gurobi", ".", "Raises", "an", "import", "error", "at", "runtime", "if", "gurobi", "is", "not", ...
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L289-L306
train
222,028
SALib/SALib
src/SALib/analyze/morris.py
compute_grouped_sigma
def compute_grouped_sigma(ungrouped_sigma, group_matrix): ''' Returns sigma for the groups of parameter values in the argument ungrouped_metric where the group consists of no more than one parameter ''' group_matrix = np.array(group_matrix, dtype=np.bool) sigma_masked = np.ma.masked_array(ungrouped_sigma * group_matrix.T, mask=(group_matrix ^ 1).T) sigma_agg = np.ma.mean(sigma_masked, axis=1) sigma = np.zeros(group_matrix.shape[1], dtype=np.float) np.copyto(sigma, sigma_agg, where=group_matrix.sum(axis=0) == 1) np.copyto(sigma, np.NAN, where=group_matrix.sum(axis=0) != 1) return sigma
python
def compute_grouped_sigma(ungrouped_sigma, group_matrix): ''' Returns sigma for the groups of parameter values in the argument ungrouped_metric where the group consists of no more than one parameter ''' group_matrix = np.array(group_matrix, dtype=np.bool) sigma_masked = np.ma.masked_array(ungrouped_sigma * group_matrix.T, mask=(group_matrix ^ 1).T) sigma_agg = np.ma.mean(sigma_masked, axis=1) sigma = np.zeros(group_matrix.shape[1], dtype=np.float) np.copyto(sigma, sigma_agg, where=group_matrix.sum(axis=0) == 1) np.copyto(sigma, np.NAN, where=group_matrix.sum(axis=0) != 1) return sigma
[ "def", "compute_grouped_sigma", "(", "ungrouped_sigma", ",", "group_matrix", ")", ":", "group_matrix", "=", "np", ".", "array", "(", "group_matrix", ",", "dtype", "=", "np", ".", "bool", ")", "sigma_masked", "=", "np", ".", "ma", ".", "masked_array", "(", ...
Returns sigma for the groups of parameter values in the argument ungrouped_metric where the group consists of no more than one parameter
[ "Returns", "sigma", "for", "the", "groups", "of", "parameter", "values", "in", "the", "argument", "ungrouped_metric", "where", "the", "group", "consists", "of", "no", "more", "than", "one", "parameter" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/morris.py#L170-L186
train
222,029
SALib/SALib
src/SALib/analyze/morris.py
compute_grouped_metric
def compute_grouped_metric(ungrouped_metric, group_matrix): ''' Computes the mean value for the groups of parameter values in the argument ungrouped_metric ''' group_matrix = np.array(group_matrix, dtype=np.bool) mu_star_masked = np.ma.masked_array(ungrouped_metric * group_matrix.T, mask=(group_matrix ^ 1).T) mean_of_mu_star = np.ma.mean(mu_star_masked, axis=1) return mean_of_mu_star
python
def compute_grouped_metric(ungrouped_metric, group_matrix): ''' Computes the mean value for the groups of parameter values in the argument ungrouped_metric ''' group_matrix = np.array(group_matrix, dtype=np.bool) mu_star_masked = np.ma.masked_array(ungrouped_metric * group_matrix.T, mask=(group_matrix ^ 1).T) mean_of_mu_star = np.ma.mean(mu_star_masked, axis=1) return mean_of_mu_star
[ "def", "compute_grouped_metric", "(", "ungrouped_metric", ",", "group_matrix", ")", ":", "group_matrix", "=", "np", ".", "array", "(", "group_matrix", ",", "dtype", "=", "np", ".", "bool", ")", "mu_star_masked", "=", "np", ".", "ma", ".", "masked_array", "("...
Computes the mean value for the groups of parameter values in the argument ungrouped_metric
[ "Computes", "the", "mean", "value", "for", "the", "groups", "of", "parameter", "values", "in", "the", "argument", "ungrouped_metric" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/morris.py#L189-L201
train
222,030
SALib/SALib
src/SALib/analyze/morris.py
compute_mu_star_confidence
def compute_mu_star_confidence(ee, num_trajectories, num_resamples, conf_level): ''' Uses bootstrapping where the elementary effects are resampled with replacement to produce a histogram of resampled mu_star metrics. This resample is used to produce a confidence interval. ''' ee_resampled = np.zeros([num_trajectories]) mu_star_resampled = np.zeros([num_resamples]) if not 0 < conf_level < 1: raise ValueError("Confidence level must be between 0-1.") resample_index = np.random.randint( len(ee), size=(num_resamples, num_trajectories)) ee_resampled = ee[resample_index] # Compute average of the absolute values over each of the resamples mu_star_resampled = np.average(np.abs(ee_resampled), axis=1) return norm.ppf(0.5 + conf_level / 2) * mu_star_resampled.std(ddof=1)
python
def compute_mu_star_confidence(ee, num_trajectories, num_resamples, conf_level): ''' Uses bootstrapping where the elementary effects are resampled with replacement to produce a histogram of resampled mu_star metrics. This resample is used to produce a confidence interval. ''' ee_resampled = np.zeros([num_trajectories]) mu_star_resampled = np.zeros([num_resamples]) if not 0 < conf_level < 1: raise ValueError("Confidence level must be between 0-1.") resample_index = np.random.randint( len(ee), size=(num_resamples, num_trajectories)) ee_resampled = ee[resample_index] # Compute average of the absolute values over each of the resamples mu_star_resampled = np.average(np.abs(ee_resampled), axis=1) return norm.ppf(0.5 + conf_level / 2) * mu_star_resampled.std(ddof=1)
[ "def", "compute_mu_star_confidence", "(", "ee", ",", "num_trajectories", ",", "num_resamples", ",", "conf_level", ")", ":", "ee_resampled", "=", "np", ".", "zeros", "(", "[", "num_trajectories", "]", ")", "mu_star_resampled", "=", "np", ".", "zeros", "(", "[",...
Uses bootstrapping where the elementary effects are resampled with replacement to produce a histogram of resampled mu_star metrics. This resample is used to produce a confidence interval.
[ "Uses", "bootstrapping", "where", "the", "elementary", "effects", "are", "resampled", "with", "replacement", "to", "produce", "a", "histogram", "of", "resampled", "mu_star", "metrics", ".", "This", "resample", "is", "used", "to", "produce", "a", "confidence", "i...
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/morris.py#L261-L280
train
222,031
SALib/SALib
src/SALib/sample/morris/gurobi.py
timestamp
def timestamp(num_params, p_levels, k_choices, N): """ Returns a uniform timestamp with parameter values for file identification """ string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params, p_levels, k_choices, N, dt.strftime(dt.now(), "%d%m%y%H%M%S")) return string
python
def timestamp(num_params, p_levels, k_choices, N): """ Returns a uniform timestamp with parameter values for file identification """ string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params, p_levels, k_choices, N, dt.strftime(dt.now(), "%d%m%y%H%M%S")) return string
[ "def", "timestamp", "(", "num_params", ",", "p_levels", ",", "k_choices", ",", "N", ")", ":", "string", "=", "\"_v%s_l%s_gs%s_k%s_N%s_%s.txt\"", "%", "(", "num_params", ",", "p_levels", ",", "k_choices", ",", "N", ",", "dt", ".", "strftime", "(", "dt", "."...
Returns a uniform timestamp with parameter values for file identification
[ "Returns", "a", "uniform", "timestamp", "with", "parameter", "values", "for", "file", "identification" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/gurobi.py#L131-L141
train
222,032
SALib/SALib
src/SALib/sample/morris/brute.py
BruteForce.brute_force_most_distant
def brute_force_most_distant(self, input_sample, num_samples, num_params, k_choices, num_groups=None): """Use brute force method to find most distant trajectories Arguments --------- input_sample : numpy.ndarray num_samples : int The number of samples to generate num_params : int The number of parameters k_choices : int The number of optimal trajectories num_groups : int, default=None The number of groups Returns ------- list """ scores = self.find_most_distant(input_sample, num_samples, num_params, k_choices, num_groups) maximum_combo = self.find_maximum(scores, num_samples, k_choices) return maximum_combo
python
def brute_force_most_distant(self, input_sample, num_samples, num_params, k_choices, num_groups=None): """Use brute force method to find most distant trajectories Arguments --------- input_sample : numpy.ndarray num_samples : int The number of samples to generate num_params : int The number of parameters k_choices : int The number of optimal trajectories num_groups : int, default=None The number of groups Returns ------- list """ scores = self.find_most_distant(input_sample, num_samples, num_params, k_choices, num_groups) maximum_combo = self.find_maximum(scores, num_samples, k_choices) return maximum_combo
[ "def", "brute_force_most_distant", "(", "self", ",", "input_sample", ",", "num_samples", ",", "num_params", ",", "k_choices", ",", "num_groups", "=", "None", ")", ":", "scores", "=", "self", ".", "find_most_distant", "(", "input_sample", ",", "num_samples", ",",...
Use brute force method to find most distant trajectories Arguments --------- input_sample : numpy.ndarray num_samples : int The number of samples to generate num_params : int The number of parameters k_choices : int The number of optimal trajectories num_groups : int, default=None The number of groups Returns ------- list
[ "Use", "brute", "force", "method", "to", "find", "most", "distant", "trajectories" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/brute.py#L19-L48
train
222,033
SALib/SALib
src/SALib/sample/morris/brute.py
BruteForce.find_most_distant
def find_most_distant(self, input_sample, num_samples, num_params, k_choices, num_groups=None): """ Finds the 'k_choices' most distant choices from the 'num_samples' trajectories contained in 'input_sample' Arguments --------- input_sample : numpy.ndarray num_samples : int The number of samples to generate num_params : int The number of parameters k_choices : int The number of optimal trajectories num_groups : int, default=None The number of groups Returns ------- numpy.ndarray """ # Now evaluate the (N choose k_choices) possible combinations if nchoosek(num_samples, k_choices) >= sys.maxsize: raise ValueError("Number of combinations is too large") number_of_combinations = int(nchoosek(num_samples, k_choices)) # First compute the distance matrix for each possible pairing # of trajectories and store in a shared-memory array distance_matrix = self.compute_distance_matrix(input_sample, num_samples, num_params, num_groups) # Initialise the output array chunk = int(1e6) if chunk > number_of_combinations: chunk = number_of_combinations counter = 0 # Generate a list of all the possible combinations combo_gen = combinations(list(range(num_samples)), k_choices) scores = np.zeros(number_of_combinations, dtype=np.float32) # Generate the pairwise indices once pairwise = np.array( [y for y in combinations(list(range(k_choices)), 2)]) for combos in self.grouper(chunk, combo_gen): scores[(counter * chunk):((counter + 1) * chunk)] \ = self.mappable(combos, pairwise, distance_matrix) counter += 1 return scores
python
def find_most_distant(self, input_sample, num_samples, num_params, k_choices, num_groups=None): """ Finds the 'k_choices' most distant choices from the 'num_samples' trajectories contained in 'input_sample' Arguments --------- input_sample : numpy.ndarray num_samples : int The number of samples to generate num_params : int The number of parameters k_choices : int The number of optimal trajectories num_groups : int, default=None The number of groups Returns ------- numpy.ndarray """ # Now evaluate the (N choose k_choices) possible combinations if nchoosek(num_samples, k_choices) >= sys.maxsize: raise ValueError("Number of combinations is too large") number_of_combinations = int(nchoosek(num_samples, k_choices)) # First compute the distance matrix for each possible pairing # of trajectories and store in a shared-memory array distance_matrix = self.compute_distance_matrix(input_sample, num_samples, num_params, num_groups) # Initialise the output array chunk = int(1e6) if chunk > number_of_combinations: chunk = number_of_combinations counter = 0 # Generate a list of all the possible combinations combo_gen = combinations(list(range(num_samples)), k_choices) scores = np.zeros(number_of_combinations, dtype=np.float32) # Generate the pairwise indices once pairwise = np.array( [y for y in combinations(list(range(k_choices)), 2)]) for combos in self.grouper(chunk, combo_gen): scores[(counter * chunk):((counter + 1) * chunk)] \ = self.mappable(combos, pairwise, distance_matrix) counter += 1 return scores
[ "def", "find_most_distant", "(", "self", ",", "input_sample", ",", "num_samples", ",", "num_params", ",", "k_choices", ",", "num_groups", "=", "None", ")", ":", "# Now evaluate the (N choose k_choices) possible combinations", "if", "nchoosek", "(", "num_samples", ",", ...
Finds the 'k_choices' most distant choices from the 'num_samples' trajectories contained in 'input_sample' Arguments --------- input_sample : numpy.ndarray num_samples : int The number of samples to generate num_params : int The number of parameters k_choices : int The number of optimal trajectories num_groups : int, default=None The number of groups Returns ------- numpy.ndarray
[ "Finds", "the", "k_choices", "most", "distant", "choices", "from", "the", "num_samples", "trajectories", "contained", "in", "input_sample" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/brute.py#L50-L101
train
222,034
SALib/SALib
src/SALib/sample/morris/brute.py
BruteForce.mappable
def mappable(combos, pairwise, distance_matrix): ''' Obtains scores from the distance_matrix for each pairwise combination held in the combos array Arguments ---------- combos : numpy.ndarray pairwise : numpy.ndarray distance_matrix : numpy.ndarray ''' combos = np.array(combos) # Create a list of all pairwise combination for each combo in combos combo_list = combos[:, pairwise[:, ]] addresses = tuple([combo_list[:, :, 1], combo_list[:, :, 0]]) all_distances = distance_matrix[addresses] new_scores = np.sqrt( np.einsum('ij,ij->i', all_distances, all_distances)) return new_scores
python
def mappable(combos, pairwise, distance_matrix): ''' Obtains scores from the distance_matrix for each pairwise combination held in the combos array Arguments ---------- combos : numpy.ndarray pairwise : numpy.ndarray distance_matrix : numpy.ndarray ''' combos = np.array(combos) # Create a list of all pairwise combination for each combo in combos combo_list = combos[:, pairwise[:, ]] addresses = tuple([combo_list[:, :, 1], combo_list[:, :, 0]]) all_distances = distance_matrix[addresses] new_scores = np.sqrt( np.einsum('ij,ij->i', all_distances, all_distances)) return new_scores
[ "def", "mappable", "(", "combos", ",", "pairwise", ",", "distance_matrix", ")", ":", "combos", "=", "np", ".", "array", "(", "combos", ")", "# Create a list of all pairwise combination for each combo in combos", "combo_list", "=", "combos", "[", ":", ",", "pairwise"...
Obtains scores from the distance_matrix for each pairwise combination held in the combos array Arguments ---------- combos : numpy.ndarray pairwise : numpy.ndarray distance_matrix : numpy.ndarray
[ "Obtains", "scores", "from", "the", "distance_matrix", "for", "each", "pairwise", "combination", "held", "in", "the", "combos", "array" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/brute.py#L113-L133
train
222,035
SALib/SALib
src/SALib/sample/morris/brute.py
BruteForce.find_maximum
def find_maximum(self, scores, N, k_choices): """Finds the `k_choices` maximum scores from `scores` Arguments --------- scores : numpy.ndarray N : int k_choices : int Returns ------- list """ if not isinstance(scores, np.ndarray): raise TypeError("Scores input is not a numpy array") index_of_maximum = int(scores.argmax()) maximum_combo = self.nth(combinations( list(range(N)), k_choices), index_of_maximum, None) return sorted(maximum_combo)
python
def find_maximum(self, scores, N, k_choices): """Finds the `k_choices` maximum scores from `scores` Arguments --------- scores : numpy.ndarray N : int k_choices : int Returns ------- list """ if not isinstance(scores, np.ndarray): raise TypeError("Scores input is not a numpy array") index_of_maximum = int(scores.argmax()) maximum_combo = self.nth(combinations( list(range(N)), k_choices), index_of_maximum, None) return sorted(maximum_combo)
[ "def", "find_maximum", "(", "self", ",", "scores", ",", "N", ",", "k_choices", ")", ":", "if", "not", "isinstance", "(", "scores", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"Scores input is not a numpy array\"", ")", "index_of_maximum",...
Finds the `k_choices` maximum scores from `scores` Arguments --------- scores : numpy.ndarray N : int k_choices : int Returns ------- list
[ "Finds", "the", "k_choices", "maximum", "scores", "from", "scores" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/brute.py#L135-L154
train
222,036
SALib/SALib
src/SALib/sample/morris/brute.py
BruteForce.nth
def nth(iterable, n, default=None): """Returns the nth item or a default value Arguments --------- iterable : iterable n : int default : default=None The default value to return """ if type(n) != int: raise TypeError("n is not an integer") return next(islice(iterable, n, None), default)
python
def nth(iterable, n, default=None): """Returns the nth item or a default value Arguments --------- iterable : iterable n : int default : default=None The default value to return """ if type(n) != int: raise TypeError("n is not an integer") return next(islice(iterable, n, None), default)
[ "def", "nth", "(", "iterable", ",", "n", ",", "default", "=", "None", ")", ":", "if", "type", "(", "n", ")", "!=", "int", ":", "raise", "TypeError", "(", "\"n is not an integer\"", ")", "return", "next", "(", "islice", "(", "iterable", ",", "n", ",",...
Returns the nth item or a default value Arguments --------- iterable : iterable n : int default : default=None The default value to return
[ "Returns", "the", "nth", "item", "or", "a", "default", "value" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/brute.py#L157-L171
train
222,037
SALib/SALib
src/SALib/sample/morris/__init__.py
sample
def sample(problem, N, num_levels=4, optimal_trajectories=None, local_optimization=True): """Generate model inputs using the Method of Morris Returns a NumPy matrix containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D` columns, where :math:`D` is the number of parameters, :math:`G` is the number of groups (if no groups are selected, the number of parameters). :math:`T` is the number of trajectories :math:`N`, or `optimal_trajectories` if selected. These model inputs are intended to be used with :func:`SALib.analyze.morris.analyze`. Parameters ---------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels optimal_trajectories : int The number of optimal trajectories to sample (between 2 and N) local_optimization : bool, default=True Flag whether to use local optimization according to Ruano et al. (2012) Speeds up the process tremendously for bigger N and num_levels. If set to ``False`` brute force method is used, unless ``gurobipy`` is available Returns ------- sample : numpy.ndarray Returns a numpy.ndarray containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and :math:`D` columns, where :math:`D` is the number of parameters. """ if problem.get('groups'): sample = _sample_groups(problem, N, num_levels) else: sample = _sample_oat(problem, N, num_levels) if optimal_trajectories: sample = _compute_optimised_trajectories(problem, sample, N, optimal_trajectories, local_optimization) scale_samples(sample, problem['bounds']) return sample
python
def sample(problem, N, num_levels=4, optimal_trajectories=None, local_optimization=True): """Generate model inputs using the Method of Morris Returns a NumPy matrix containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D` columns, where :math:`D` is the number of parameters, :math:`G` is the number of groups (if no groups are selected, the number of parameters). :math:`T` is the number of trajectories :math:`N`, or `optimal_trajectories` if selected. These model inputs are intended to be used with :func:`SALib.analyze.morris.analyze`. Parameters ---------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels optimal_trajectories : int The number of optimal trajectories to sample (between 2 and N) local_optimization : bool, default=True Flag whether to use local optimization according to Ruano et al. (2012) Speeds up the process tremendously for bigger N and num_levels. If set to ``False`` brute force method is used, unless ``gurobipy`` is available Returns ------- sample : numpy.ndarray Returns a numpy.ndarray containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and :math:`D` columns, where :math:`D` is the number of parameters. """ if problem.get('groups'): sample = _sample_groups(problem, N, num_levels) else: sample = _sample_oat(problem, N, num_levels) if optimal_trajectories: sample = _compute_optimised_trajectories(problem, sample, N, optimal_trajectories, local_optimization) scale_samples(sample, problem['bounds']) return sample
[ "def", "sample", "(", "problem", ",", "N", ",", "num_levels", "=", "4", ",", "optimal_trajectories", "=", "None", ",", "local_optimization", "=", "True", ")", ":", "if", "problem", ".", "get", "(", "'groups'", ")", ":", "sample", "=", "_sample_groups", "...
Generate model inputs using the Method of Morris Returns a NumPy matrix containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D` columns, where :math:`D` is the number of parameters, :math:`G` is the number of groups (if no groups are selected, the number of parameters). :math:`T` is the number of trajectories :math:`N`, or `optimal_trajectories` if selected. These model inputs are intended to be used with :func:`SALib.analyze.morris.analyze`. Parameters ---------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels optimal_trajectories : int The number of optimal trajectories to sample (between 2 and N) local_optimization : bool, default=True Flag whether to use local optimization according to Ruano et al. (2012) Speeds up the process tremendously for bigger N and num_levels. If set to ``False`` brute force method is used, unless ``gurobipy`` is available Returns ------- sample : numpy.ndarray Returns a numpy.ndarray containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and :math:`D` columns, where :math:`D` is the number of parameters.
[ "Generate", "model", "inputs", "using", "the", "Method", "of", "Morris" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/__init__.py#L53-L103
train
222,038
SALib/SALib
src/SALib/sample/morris/__init__.py
_sample_oat
def _sample_oat(problem, N, num_levels=4): """Generate trajectories without groups Arguments --------- problem : dict The problem definition N : int The number of samples to generate num_levels : int, default=4 The number of grid levels """ group_membership = np.asmatrix(np.identity(problem['num_vars'], dtype=int)) num_params = group_membership.shape[0] sample = np.zeros((N * (num_params + 1), num_params)) sample = np.array([generate_trajectory(group_membership, num_levels) for n in range(N)]) return sample.reshape((N * (num_params + 1), num_params))
python
def _sample_oat(problem, N, num_levels=4): """Generate trajectories without groups Arguments --------- problem : dict The problem definition N : int The number of samples to generate num_levels : int, default=4 The number of grid levels """ group_membership = np.asmatrix(np.identity(problem['num_vars'], dtype=int)) num_params = group_membership.shape[0] sample = np.zeros((N * (num_params + 1), num_params)) sample = np.array([generate_trajectory(group_membership, num_levels) for n in range(N)]) return sample.reshape((N * (num_params + 1), num_params))
[ "def", "_sample_oat", "(", "problem", ",", "N", ",", "num_levels", "=", "4", ")", ":", "group_membership", "=", "np", ".", "asmatrix", "(", "np", ".", "identity", "(", "problem", "[", "'num_vars'", "]", ",", "dtype", "=", "int", ")", ")", "num_params",...
Generate trajectories without groups Arguments --------- problem : dict The problem definition N : int The number of samples to generate num_levels : int, default=4 The number of grid levels
[ "Generate", "trajectories", "without", "groups" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/__init__.py#L106-L126
train
222,039
SALib/SALib
src/SALib/sample/morris/__init__.py
_sample_groups
def _sample_groups(problem, N, num_levels=4): """Generate trajectories for groups Returns an :math:`N(g+1)`-by-:math:`k` array of `N` trajectories, where :math:`g` is the number of groups and :math:`k` is the number of factors Arguments --------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels Returns ------- numpy.ndarray """ if len(problem['groups']) != problem['num_vars']: raise ValueError("Groups do not match to number of variables") group_membership, _ = compute_groups_matrix(problem['groups']) if group_membership is None: raise ValueError("Please define the 'group_membership' matrix") if not isinstance(group_membership, np.ndarray): raise TypeError("Argument 'group_membership' should be formatted \ as a numpy ndarray") num_params = group_membership.shape[0] num_groups = group_membership.shape[1] sample = np.zeros((N * (num_groups + 1), num_params)) sample = np.array([generate_trajectory(group_membership, num_levels) for n in range(N)]) return sample.reshape((N * (num_groups + 1), num_params))
python
def _sample_groups(problem, N, num_levels=4): """Generate trajectories for groups Returns an :math:`N(g+1)`-by-:math:`k` array of `N` trajectories, where :math:`g` is the number of groups and :math:`k` is the number of factors Arguments --------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels Returns ------- numpy.ndarray """ if len(problem['groups']) != problem['num_vars']: raise ValueError("Groups do not match to number of variables") group_membership, _ = compute_groups_matrix(problem['groups']) if group_membership is None: raise ValueError("Please define the 'group_membership' matrix") if not isinstance(group_membership, np.ndarray): raise TypeError("Argument 'group_membership' should be formatted \ as a numpy ndarray") num_params = group_membership.shape[0] num_groups = group_membership.shape[1] sample = np.zeros((N * (num_groups + 1), num_params)) sample = np.array([generate_trajectory(group_membership, num_levels) for n in range(N)]) return sample.reshape((N * (num_groups + 1), num_params))
[ "def", "_sample_groups", "(", "problem", ",", "N", ",", "num_levels", "=", "4", ")", ":", "if", "len", "(", "problem", "[", "'groups'", "]", ")", "!=", "problem", "[", "'num_vars'", "]", ":", "raise", "ValueError", "(", "\"Groups do not match to number of va...
Generate trajectories for groups Returns an :math:`N(g+1)`-by-:math:`k` array of `N` trajectories, where :math:`g` is the number of groups and :math:`k` is the number of factors Arguments --------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels Returns ------- numpy.ndarray
[ "Generate", "trajectories", "for", "groups" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/__init__.py#L129-L166
train
222,040
SALib/SALib
src/SALib/sample/morris/__init__.py
generate_trajectory
def generate_trajectory(group_membership, num_levels=4): """Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray """ delta = compute_delta(num_levels) # Infer number of groups `g` and number of params `k` from # `group_membership` matrix num_params = group_membership.shape[0] num_groups = group_membership.shape[1] # Matrix B - size (g + 1) * g - lower triangular matrix B = np.tril(np.ones([num_groups + 1, num_groups], dtype=int), -1) P_star = generate_p_star(num_groups) # Matrix J - a (g+1)-by-num_params matrix of ones J = np.ones((num_groups + 1, num_params)) # Matrix D* - num_params-by-num_params matrix which decribes whether # factors move up or down D_star = np.diag([rd.choice([-1, 1]) for _ in range(num_params)]) x_star = generate_x_star(num_params, num_levels) # Matrix B* - size (num_groups + 1) * num_params B_star = compute_b_star(J, x_star, delta, B, group_membership, P_star, D_star) return B_star
python
def generate_trajectory(group_membership, num_levels=4): """Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray """ delta = compute_delta(num_levels) # Infer number of groups `g` and number of params `k` from # `group_membership` matrix num_params = group_membership.shape[0] num_groups = group_membership.shape[1] # Matrix B - size (g + 1) * g - lower triangular matrix B = np.tril(np.ones([num_groups + 1, num_groups], dtype=int), -1) P_star = generate_p_star(num_groups) # Matrix J - a (g+1)-by-num_params matrix of ones J = np.ones((num_groups + 1, num_params)) # Matrix D* - num_params-by-num_params matrix which decribes whether # factors move up or down D_star = np.diag([rd.choice([-1, 1]) for _ in range(num_params)]) x_star = generate_x_star(num_params, num_levels) # Matrix B* - size (num_groups + 1) * num_params B_star = compute_b_star(J, x_star, delta, B, group_membership, P_star, D_star) return B_star
[ "def", "generate_trajectory", "(", "group_membership", ",", "num_levels", "=", "4", ")", ":", "delta", "=", "compute_delta", "(", "num_levels", ")", "# Infer number of groups `g` and number of params `k` from", "# `group_membership` matrix", "num_params", "=", "group_membersh...
Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray
[ "Return", "a", "single", "trajectory" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/__init__.py#L169-L215
train
222,041
SALib/SALib
src/SALib/sample/morris/__init__.py
generate_p_star
def generate_p_star(num_groups): """Describe the order in which groups move Arguments --------- num_groups : int Returns ------- np.ndarray Matrix P* - size (g-by-g) """ p_star = np.eye(num_groups, num_groups) rd.shuffle(p_star) return p_star
python
def generate_p_star(num_groups): """Describe the order in which groups move Arguments --------- num_groups : int Returns ------- np.ndarray Matrix P* - size (g-by-g) """ p_star = np.eye(num_groups, num_groups) rd.shuffle(p_star) return p_star
[ "def", "generate_p_star", "(", "num_groups", ")", ":", "p_star", "=", "np", ".", "eye", "(", "num_groups", ",", "num_groups", ")", "rd", ".", "shuffle", "(", "p_star", ")", "return", "p_star" ]
Describe the order in which groups move Arguments --------- num_groups : int Returns ------- np.ndarray Matrix P* - size (g-by-g)
[ "Describe", "the", "order", "in", "which", "groups", "move" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/__init__.py#L230-L244
train
222,042
SALib/SALib
src/SALib/scripts/salib.py
parse_subargs
def parse_subargs(module, parser, method, opts): '''Attach argument parser for action specific options. Arguments --------- module : module name of module to extract action from parser : argparser argparser object to attach additional arguments to method : str name of method (morris, sobol, etc). Must match one of the available submodules opts : list A list of argument options to parse Returns --------- subargs : argparser namespace object ''' module.cli_args(parser) subargs = parser.parse_args(opts) return subargs
python
def parse_subargs(module, parser, method, opts): '''Attach argument parser for action specific options. Arguments --------- module : module name of module to extract action from parser : argparser argparser object to attach additional arguments to method : str name of method (morris, sobol, etc). Must match one of the available submodules opts : list A list of argument options to parse Returns --------- subargs : argparser namespace object ''' module.cli_args(parser) subargs = parser.parse_args(opts) return subargs
[ "def", "parse_subargs", "(", "module", ",", "parser", ",", "method", ",", "opts", ")", ":", "module", ".", "cli_args", "(", "parser", ")", "subargs", "=", "parser", ".", "parse_args", "(", "opts", ")", "return", "subargs" ]
Attach argument parser for action specific options. Arguments --------- module : module name of module to extract action from parser : argparser argparser object to attach additional arguments to method : str name of method (morris, sobol, etc). Must match one of the available submodules opts : list A list of argument options to parse Returns --------- subargs : argparser namespace object
[ "Attach", "argument", "parser", "for", "action", "specific", "options", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/scripts/salib.py#L8-L29
train
222,043
SALib/SALib
src/SALib/sample/morris/local.py
LocalOptimisation.find_local_maximum
def find_local_maximum(self, input_sample, N, num_params, k_choices, num_groups=None): """Find the most different trajectories in the input sample using a local approach An alternative by Ruano et al. (2012) for the brute force approach as originally proposed by Campolongo et al. (2007). The method should improve the speed with which an optimal set of trajectories is found tremendously for larger sample sizes. Arguments --------- input_sample : np.ndarray N : int The number of trajectories num_params : int The number of factors k_choices : int The number of optimal trajectories to return num_groups : int, default=None The number of groups Returns ------- list """ distance_matrix = self.compute_distance_matrix(input_sample, N, num_params, num_groups, local_optimization=True) tot_indices_list = [] tot_max_array = np.zeros(k_choices - 1) # Loop over `k_choices`, i starts at 1 for i in range(1, k_choices): indices_list = [] row_maxima_i = np.zeros(len(distance_matrix)) row_nr = 0 for row in distance_matrix: indices = tuple(row.argsort()[-i:][::-1]) + (row_nr,) row_maxima_i[row_nr] = self.sum_distances( indices, distance_matrix) indices_list.append(indices) row_nr += 1 # Find the indices belonging to the maximum distance i_max_ind = self.get_max_sum_ind(indices_list, row_maxima_i, i, 0) # Loop 'm' (called loop 'k' in Ruano) m_max_ind = i_max_ind # m starts at 1 m = 1 while m <= k_choices - i - 1: m_ind = self.add_indices(m_max_ind, distance_matrix) m_maxima = np.zeros(len(m_ind)) for n in range(0, len(m_ind)): m_maxima[n] = self.sum_distances(m_ind[n], distance_matrix) m_max_ind = self.get_max_sum_ind(m_ind, m_maxima, i, m) m += 1 tot_indices_list.append(m_max_ind) tot_max_array[i - 1] = self.sum_distances(m_max_ind, distance_matrix) tot_max = self.get_max_sum_ind( tot_indices_list, tot_max_array, "tot", "tot") return sorted(list(tot_max))
python
def find_local_maximum(self, input_sample, N, num_params, k_choices, num_groups=None): """Find the most different trajectories in the input sample using a local approach An alternative by Ruano et al. (2012) for the brute force approach as originally proposed by Campolongo et al. (2007). The method should improve the speed with which an optimal set of trajectories is found tremendously for larger sample sizes. Arguments --------- input_sample : np.ndarray N : int The number of trajectories num_params : int The number of factors k_choices : int The number of optimal trajectories to return num_groups : int, default=None The number of groups Returns ------- list """ distance_matrix = self.compute_distance_matrix(input_sample, N, num_params, num_groups, local_optimization=True) tot_indices_list = [] tot_max_array = np.zeros(k_choices - 1) # Loop over `k_choices`, i starts at 1 for i in range(1, k_choices): indices_list = [] row_maxima_i = np.zeros(len(distance_matrix)) row_nr = 0 for row in distance_matrix: indices = tuple(row.argsort()[-i:][::-1]) + (row_nr,) row_maxima_i[row_nr] = self.sum_distances( indices, distance_matrix) indices_list.append(indices) row_nr += 1 # Find the indices belonging to the maximum distance i_max_ind = self.get_max_sum_ind(indices_list, row_maxima_i, i, 0) # Loop 'm' (called loop 'k' in Ruano) m_max_ind = i_max_ind # m starts at 1 m = 1 while m <= k_choices - i - 1: m_ind = self.add_indices(m_max_ind, distance_matrix) m_maxima = np.zeros(len(m_ind)) for n in range(0, len(m_ind)): m_maxima[n] = self.sum_distances(m_ind[n], distance_matrix) m_max_ind = self.get_max_sum_ind(m_ind, m_maxima, i, m) m += 1 tot_indices_list.append(m_max_ind) tot_max_array[i - 1] = self.sum_distances(m_max_ind, distance_matrix) tot_max = self.get_max_sum_ind( tot_indices_list, tot_max_array, "tot", "tot") return sorted(list(tot_max))
[ "def", "find_local_maximum", "(", "self", ",", "input_sample", ",", "N", ",", "num_params", ",", "k_choices", ",", "num_groups", "=", "None", ")", ":", "distance_matrix", "=", "self", ".", "compute_distance_matrix", "(", "input_sample", ",", "N", ",", "num_par...
Find the most different trajectories in the input sample using a local approach An alternative by Ruano et al. (2012) for the brute force approach as originally proposed by Campolongo et al. (2007). The method should improve the speed with which an optimal set of trajectories is found tremendously for larger sample sizes. Arguments --------- input_sample : np.ndarray N : int The number of trajectories num_params : int The number of factors k_choices : int The number of optimal trajectories to return num_groups : int, default=None The number of groups Returns ------- list
[ "Find", "the", "most", "different", "trajectories", "in", "the", "input", "sample", "using", "a", "local", "approach" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/local.py#L18-L89
train
222,044
SALib/SALib
src/SALib/sample/morris/local.py
LocalOptimisation.sum_distances
def sum_distances(self, indices, distance_matrix): """Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate. """ combs_tup = np.array(tuple(combinations(indices, 2))) # Put indices from tuples into two-dimensional array. combs = np.array([[i[0] for i in combs_tup], [i[1] for i in combs_tup]]) # Calculate distance (vectorized) dist = np.sqrt( np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0)) return dist
python
def sum_distances(self, indices, distance_matrix): """Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate. """ combs_tup = np.array(tuple(combinations(indices, 2))) # Put indices from tuples into two-dimensional array. combs = np.array([[i[0] for i in combs_tup], [i[1] for i in combs_tup]]) # Calculate distance (vectorized) dist = np.sqrt( np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0)) return dist
[ "def", "sum_distances", "(", "self", ",", "indices", ",", "distance_matrix", ")", ":", "combs_tup", "=", "np", ".", "array", "(", "tuple", "(", "combinations", "(", "indices", ",", "2", ")", ")", ")", "# Put indices from tuples into two-dimensional array.", "com...
Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate.
[ "Calculate", "combinatorial", "distance", "between", "a", "select", "group", "of", "trajectories", "indicated", "by", "indices" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/local.py#L91-L121
train
222,045
SALib/SALib
src/SALib/sample/morris/local.py
LocalOptimisation.get_max_sum_ind
def get_max_sum_ind(self, indices_list, distances, i, m): '''Get the indices that belong to the maximum distance in `distances` Arguments --------- indices_list : list list of tuples distances : numpy.ndarray size M i : int m : int Returns ------- list ''' if len(indices_list) != len(distances): msg = "Indices and distances are lists of different length." + \ "Length indices_list = {} and length distances = {}." + \ "In loop i = {} and m = {}" raise ValueError(msg.format( len(indices_list), len(distances), i, m)) max_index = tuple(distances.argsort()[-1:][::-1]) return indices_list[max_index[0]]
python
def get_max_sum_ind(self, indices_list, distances, i, m): '''Get the indices that belong to the maximum distance in `distances` Arguments --------- indices_list : list list of tuples distances : numpy.ndarray size M i : int m : int Returns ------- list ''' if len(indices_list) != len(distances): msg = "Indices and distances are lists of different length." + \ "Length indices_list = {} and length distances = {}." + \ "In loop i = {} and m = {}" raise ValueError(msg.format( len(indices_list), len(distances), i, m)) max_index = tuple(distances.argsort()[-1:][::-1]) return indices_list[max_index[0]]
[ "def", "get_max_sum_ind", "(", "self", ",", "indices_list", ",", "distances", ",", "i", ",", "m", ")", ":", "if", "len", "(", "indices_list", ")", "!=", "len", "(", "distances", ")", ":", "msg", "=", "\"Indices and distances are lists of different length.\"", ...
Get the indices that belong to the maximum distance in `distances` Arguments --------- indices_list : list list of tuples distances : numpy.ndarray size M i : int m : int Returns ------- list
[ "Get", "the", "indices", "that", "belong", "to", "the", "maximum", "distance", "in", "distances" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/local.py#L123-L147
train
222,046
SALib/SALib
src/SALib/sample/morris/local.py
LocalOptimisation.add_indices
def add_indices(self, indices, distance_matrix): '''Adds extra indices for the combinatorial problem. Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Example ------- >>> add_indices((1,2), numpy.array((5,5))) [(1, 2, 3), (1, 2, 4), (1, 2, 5)] ''' list_new_indices = [] for i in range(0, len(distance_matrix)): if i not in indices: list_new_indices.append(indices + (i,)) return list_new_indices
python
def add_indices(self, indices, distance_matrix): '''Adds extra indices for the combinatorial problem. Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Example ------- >>> add_indices((1,2), numpy.array((5,5))) [(1, 2, 3), (1, 2, 4), (1, 2, 5)] ''' list_new_indices = [] for i in range(0, len(distance_matrix)): if i not in indices: list_new_indices.append(indices + (i,)) return list_new_indices
[ "def", "add_indices", "(", "self", ",", "indices", ",", "distance_matrix", ")", ":", "list_new_indices", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "distance_matrix", ")", ")", ":", "if", "i", "not", "in", "indices", ":", "l...
Adds extra indices for the combinatorial problem. Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Example ------- >>> add_indices((1,2), numpy.array((5,5))) [(1, 2, 3), (1, 2, 4), (1, 2, 5)]
[ "Adds", "extra", "indices", "for", "the", "combinatorial", "problem", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/local.py#L149-L167
train
222,047
SALib/SALib
src/SALib/util/results.py
ResultDict.to_df
def to_df(self): '''Convert dict structure into Pandas DataFrame.''' return pd.DataFrame({k: v for k, v in self.items() if k is not 'names'}, index=self['names'])
python
def to_df(self): '''Convert dict structure into Pandas DataFrame.''' return pd.DataFrame({k: v for k, v in self.items() if k is not 'names'}, index=self['names'])
[ "def", "to_df", "(", "self", ")", ":", "return", "pd", ".", "DataFrame", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "if", "k", "is", "not", "'names'", "}", ",", "index", "=", "self", "[", "'names'", "...
Convert dict structure into Pandas DataFrame.
[ "Convert", "dict", "structure", "into", "Pandas", "DataFrame", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/results.py#L13-L16
train
222,048
SALib/SALib
src/SALib/plotting/morris.py
horizontal_bar_plot
def horizontal_bar_plot(ax, Si, param_dict, sortby='mu_star', unit=''): '''Updates a matplotlib axes instance with a horizontal bar plot of mu_star, with error bars representing mu_star_conf ''' assert sortby in ['mu_star', 'mu_star_conf', 'sigma', 'mu'] # Sort all the plotted elements by mu_star (or optionally another # metric) names_sorted = _sort_Si(Si, 'names', sortby) mu_star_sorted = _sort_Si(Si, 'mu_star', sortby) mu_star_conf_sorted = _sort_Si(Si, 'mu_star_conf', sortby) # Plot horizontal barchart y_pos = np.arange(len(mu_star_sorted)) plot_names = names_sorted out = ax.barh(y_pos, mu_star_sorted, xerr=mu_star_conf_sorted, align='center', ecolor='black', **param_dict) ax.set_yticks(y_pos) ax.set_yticklabels(plot_names) ax.set_xlabel(r'$\mu^\star$' + unit) ax.set_ylim(min(y_pos)-1, max(y_pos)+1) return out
python
def horizontal_bar_plot(ax, Si, param_dict, sortby='mu_star', unit=''): '''Updates a matplotlib axes instance with a horizontal bar plot of mu_star, with error bars representing mu_star_conf ''' assert sortby in ['mu_star', 'mu_star_conf', 'sigma', 'mu'] # Sort all the plotted elements by mu_star (or optionally another # metric) names_sorted = _sort_Si(Si, 'names', sortby) mu_star_sorted = _sort_Si(Si, 'mu_star', sortby) mu_star_conf_sorted = _sort_Si(Si, 'mu_star_conf', sortby) # Plot horizontal barchart y_pos = np.arange(len(mu_star_sorted)) plot_names = names_sorted out = ax.barh(y_pos, mu_star_sorted, xerr=mu_star_conf_sorted, align='center', ecolor='black', **param_dict) ax.set_yticks(y_pos) ax.set_yticklabels(plot_names) ax.set_xlabel(r'$\mu^\star$' + unit) ax.set_ylim(min(y_pos)-1, max(y_pos)+1) return out
[ "def", "horizontal_bar_plot", "(", "ax", ",", "Si", ",", "param_dict", ",", "sortby", "=", "'mu_star'", ",", "unit", "=", "''", ")", ":", "assert", "sortby", "in", "[", "'mu_star'", ",", "'mu_star_conf'", ",", "'sigma'", ",", "'mu'", "]", "# Sort all the p...
Updates a matplotlib axes instance with a horizontal bar plot of mu_star, with error bars representing mu_star_conf
[ "Updates", "a", "matplotlib", "axes", "instance", "with", "a", "horizontal", "bar", "plot" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/plotting/morris.py#L33-L64
train
222,049
SALib/SALib
src/SALib/plotting/morris.py
sample_histograms
def sample_histograms(fig, input_sample, problem, param_dict): '''Plots a set of subplots of histograms of the input sample ''' num_vars = problem['num_vars'] names = problem['names'] framing = 101 + (num_vars * 10) # Find number of levels num_levels = len(set(input_sample[:, 1])) out = [] for variable in range(num_vars): ax = fig.add_subplot(framing + variable) out.append(ax.hist(input_sample[:, variable], bins=num_levels, normed=False, label=None, **param_dict)) ax.set_title('%s' % (names[variable])) ax.tick_params(axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelbottom='off') # labels along the bottom edge off) if variable > 0: ax.tick_params(axis='y', # changes apply to the y-axis which='both', # both major and minor ticks affected labelleft='off') # labels along the left edge off) return out
python
def sample_histograms(fig, input_sample, problem, param_dict): '''Plots a set of subplots of histograms of the input sample ''' num_vars = problem['num_vars'] names = problem['names'] framing = 101 + (num_vars * 10) # Find number of levels num_levels = len(set(input_sample[:, 1])) out = [] for variable in range(num_vars): ax = fig.add_subplot(framing + variable) out.append(ax.hist(input_sample[:, variable], bins=num_levels, normed=False, label=None, **param_dict)) ax.set_title('%s' % (names[variable])) ax.tick_params(axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelbottom='off') # labels along the bottom edge off) if variable > 0: ax.tick_params(axis='y', # changes apply to the y-axis which='both', # both major and minor ticks affected labelleft='off') # labels along the left edge off) return out
[ "def", "sample_histograms", "(", "fig", ",", "input_sample", ",", "problem", ",", "param_dict", ")", ":", "num_vars", "=", "problem", "[", "'num_vars'", "]", "names", "=", "problem", "[", "'names'", "]", "framing", "=", "101", "+", "(", "num_vars", "*", ...
Plots a set of subplots of histograms of the input sample
[ "Plots", "a", "set", "of", "subplots", "of", "histograms", "of", "the", "input", "sample" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/plotting/morris.py#L105-L138
train
222,050
SALib/SALib
src/SALib/sample/ff.py
extend_bounds
def extend_bounds(problem): """Extends the problem bounds to the nearest power of two Arguments ========= problem : dict The problem definition """ num_vars = problem['num_vars'] num_ff_vars = 2 ** find_smallest(num_vars) num_dummy_variables = num_ff_vars - num_vars bounds = list(problem['bounds']) names = problem['names'] if num_dummy_variables > 0: bounds.extend([[0, 1] for x in range(num_dummy_variables)]) names.extend(["dummy_" + str(var) for var in range(num_dummy_variables)]) problem['bounds'] = bounds problem['names'] = names problem['num_vars'] = num_ff_vars return problem
python
def extend_bounds(problem): """Extends the problem bounds to the nearest power of two Arguments ========= problem : dict The problem definition """ num_vars = problem['num_vars'] num_ff_vars = 2 ** find_smallest(num_vars) num_dummy_variables = num_ff_vars - num_vars bounds = list(problem['bounds']) names = problem['names'] if num_dummy_variables > 0: bounds.extend([[0, 1] for x in range(num_dummy_variables)]) names.extend(["dummy_" + str(var) for var in range(num_dummy_variables)]) problem['bounds'] = bounds problem['names'] = names problem['num_vars'] = num_ff_vars return problem
[ "def", "extend_bounds", "(", "problem", ")", ":", "num_vars", "=", "problem", "[", "'num_vars'", "]", "num_ff_vars", "=", "2", "**", "find_smallest", "(", "num_vars", ")", "num_dummy_variables", "=", "num_ff_vars", "-", "num_vars", "bounds", "=", "list", "(", ...
Extends the problem bounds to the nearest power of two Arguments ========= problem : dict The problem definition
[ "Extends", "the", "problem", "bounds", "to", "the", "nearest", "power", "of", "two" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/ff.py#L33-L56
train
222,051
SALib/SALib
src/SALib/sample/ff.py
generate_contrast
def generate_contrast(problem): """Generates the raw sample from the problem file Arguments ========= problem : dict The problem definition """ num_vars = problem['num_vars'] # Find the smallest n, such that num_vars < k k = [2 ** n for n in range(16)] k_chosen = 2 ** find_smallest(num_vars) # Generate the fractional factorial contrast contrast = np.vstack([hadamard(k_chosen), -hadamard(k_chosen)]) return contrast
python
def generate_contrast(problem): """Generates the raw sample from the problem file Arguments ========= problem : dict The problem definition """ num_vars = problem['num_vars'] # Find the smallest n, such that num_vars < k k = [2 ** n for n in range(16)] k_chosen = 2 ** find_smallest(num_vars) # Generate the fractional factorial contrast contrast = np.vstack([hadamard(k_chosen), -hadamard(k_chosen)]) return contrast
[ "def", "generate_contrast", "(", "problem", ")", ":", "num_vars", "=", "problem", "[", "'num_vars'", "]", "# Find the smallest n, such that num_vars < k", "k", "=", "[", "2", "**", "n", "for", "n", "in", "range", "(", "16", ")", "]", "k_chosen", "=", "2", ...
Generates the raw sample from the problem file Arguments ========= problem : dict The problem definition
[ "Generates", "the", "raw", "sample", "from", "the", "problem", "file" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/ff.py#L59-L77
train
222,052
SALib/SALib
src/SALib/sample/ff.py
sample
def sample(problem, seed=None): """Generates model inputs using a fractional factorial sample Returns a NumPy matrix containing the model inputs required for a fractional factorial analysis. The resulting matrix has D columns, where D is smallest power of 2 that is greater than the number of parameters. These model inputs are intended to be used with :func:`SALib.analyze.ff.analyze`. The problem file is padded with a number of dummy variables called ``dummy_0`` required for this procedure. These dummy variables can be used as a check for errors in the analyze procedure. This algorithm is an implementation of that contained in [`Saltelli et al. 2008 <http://www.wiley.com/WileyCDA/WileyTitle/productCd-0470059974.html>`_] Arguments ========= problem : dict The problem definition Returns ======= sample : :class:`numpy.array` """ if seed: np.random.seed(seed) contrast = generate_contrast(problem) sample = np.array((contrast + 1.) / 2, dtype=np.float) problem = extend_bounds(problem) scale_samples(sample, problem['bounds']) return sample
python
def sample(problem, seed=None): """Generates model inputs using a fractional factorial sample Returns a NumPy matrix containing the model inputs required for a fractional factorial analysis. The resulting matrix has D columns, where D is smallest power of 2 that is greater than the number of parameters. These model inputs are intended to be used with :func:`SALib.analyze.ff.analyze`. The problem file is padded with a number of dummy variables called ``dummy_0`` required for this procedure. These dummy variables can be used as a check for errors in the analyze procedure. This algorithm is an implementation of that contained in [`Saltelli et al. 2008 <http://www.wiley.com/WileyCDA/WileyTitle/productCd-0470059974.html>`_] Arguments ========= problem : dict The problem definition Returns ======= sample : :class:`numpy.array` """ if seed: np.random.seed(seed) contrast = generate_contrast(problem) sample = np.array((contrast + 1.) / 2, dtype=np.float) problem = extend_bounds(problem) scale_samples(sample, problem['bounds']) return sample
[ "def", "sample", "(", "problem", ",", "seed", "=", "None", ")", ":", "if", "seed", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "contrast", "=", "generate_contrast", "(", "problem", ")", "sample", "=", "np", ".", "array", "(", "(", "con...
Generates model inputs using a fractional factorial sample Returns a NumPy matrix containing the model inputs required for a fractional factorial analysis. The resulting matrix has D columns, where D is smallest power of 2 that is greater than the number of parameters. These model inputs are intended to be used with :func:`SALib.analyze.ff.analyze`. The problem file is padded with a number of dummy variables called ``dummy_0`` required for this procedure. These dummy variables can be used as a check for errors in the analyze procedure. This algorithm is an implementation of that contained in [`Saltelli et al. 2008 <http://www.wiley.com/WileyCDA/WileyTitle/productCd-0470059974.html>`_] Arguments ========= problem : dict The problem definition Returns ======= sample : :class:`numpy.array`
[ "Generates", "model", "inputs", "using", "a", "fractional", "factorial", "sample" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/ff.py#L80-L113
train
222,053
SALib/SALib
src/SALib/sample/ff.py
cli_action
def cli_action(args): """Run sampling method Parameters ---------- args : argparse namespace """ problem = read_param_file(args.paramfile) param_values = sample(problem, seed=args.seed) np.savetxt(args.output, param_values, delimiter=args.delimiter, fmt='%.' + str(args.precision) + 'e')
python
def cli_action(args): """Run sampling method Parameters ---------- args : argparse namespace """ problem = read_param_file(args.paramfile) param_values = sample(problem, seed=args.seed) np.savetxt(args.output, param_values, delimiter=args.delimiter, fmt='%.' + str(args.precision) + 'e')
[ "def", "cli_action", "(", "args", ")", ":", "problem", "=", "read_param_file", "(", "args", ".", "paramfile", ")", "param_values", "=", "sample", "(", "problem", ",", "seed", "=", "args", ".", "seed", ")", "np", ".", "savetxt", "(", "args", ".", "outpu...
Run sampling method Parameters ---------- args : argparse namespace
[ "Run", "sampling", "method" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/ff.py#L135-L145
train
222,054
SALib/SALib
src/SALib/sample/common_args.py
setup
def setup(parser): """Add common sampling options to CLI parser. Parameters ---------- parser : argparse object Returns ---------- Updated argparse object """ parser.add_argument( '-p', '--paramfile', type=str, required=True, help='Parameter Range File') parser.add_argument( '-o', '--output', type=str, required=True, help='Output File') parser.add_argument( '-s', '--seed', type=int, required=False, default=None, help='Random Seed') parser.add_argument( '--delimiter', type=str, required=False, default=' ', help='Column delimiter') parser.add_argument('--precision', type=int, required=False, default=8, help='Output floating-point precision') return parser
python
def setup(parser): """Add common sampling options to CLI parser. Parameters ---------- parser : argparse object Returns ---------- Updated argparse object """ parser.add_argument( '-p', '--paramfile', type=str, required=True, help='Parameter Range File') parser.add_argument( '-o', '--output', type=str, required=True, help='Output File') parser.add_argument( '-s', '--seed', type=int, required=False, default=None, help='Random Seed') parser.add_argument( '--delimiter', type=str, required=False, default=' ', help='Column delimiter') parser.add_argument('--precision', type=int, required=False, default=8, help='Output floating-point precision') return parser
[ "def", "setup", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-p'", ",", "'--paramfile'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'Parameter Range File'", ")", "parser", ".", "add_argument", "(", "'-o'", ...
Add common sampling options to CLI parser. Parameters ---------- parser : argparse object Returns ---------- Updated argparse object
[ "Add", "common", "sampling", "options", "to", "CLI", "parser", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/common_args.py#L4-L29
train
222,055
SALib/SALib
src/SALib/sample/common_args.py
run_cli
def run_cli(cli_parser, run_sample, known_args=None): """Run sampling with CLI arguments. Parameters ---------- cli_parser : function Function to add method specific arguments to parser run_sample: function Method specific function that runs the sampling known_args: list [optional] Additional arguments to parse Returns ---------- argparse object """ parser = create(cli_parser) args = parser.parse_args(known_args) run_sample(args)
python
def run_cli(cli_parser, run_sample, known_args=None): """Run sampling with CLI arguments. Parameters ---------- cli_parser : function Function to add method specific arguments to parser run_sample: function Method specific function that runs the sampling known_args: list [optional] Additional arguments to parse Returns ---------- argparse object """ parser = create(cli_parser) args = parser.parse_args(known_args) run_sample(args)
[ "def", "run_cli", "(", "cli_parser", ",", "run_sample", ",", "known_args", "=", "None", ")", ":", "parser", "=", "create", "(", "cli_parser", ")", "args", "=", "parser", ".", "parse_args", "(", "known_args", ")", "run_sample", "(", "args", ")" ]
Run sampling with CLI arguments. Parameters ---------- cli_parser : function Function to add method specific arguments to parser run_sample: function Method specific function that runs the sampling known_args: list [optional] Additional arguments to parse Returns ---------- argparse object
[ "Run", "sampling", "with", "CLI", "arguments", "." ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/common_args.py#L54-L73
train
222,056
SALib/SALib
src/SALib/sample/morris/strategy.py
Strategy.run_checks
def run_checks(number_samples, k_choices): """Runs checks on `k_choices` """ assert isinstance(k_choices, int), \ "Number of optimal trajectories should be an integer" if k_choices < 2: raise ValueError( "The number of optimal trajectories must be set to 2 or more.") if k_choices >= number_samples: msg = "The number of optimal trajectories should be less than the \ number of samples" raise ValueError(msg)
python
def run_checks(number_samples, k_choices): """Runs checks on `k_choices` """ assert isinstance(k_choices, int), \ "Number of optimal trajectories should be an integer" if k_choices < 2: raise ValueError( "The number of optimal trajectories must be set to 2 or more.") if k_choices >= number_samples: msg = "The number of optimal trajectories should be less than the \ number of samples" raise ValueError(msg)
[ "def", "run_checks", "(", "number_samples", ",", "k_choices", ")", ":", "assert", "isinstance", "(", "k_choices", ",", "int", ")", ",", "\"Number of optimal trajectories should be an integer\"", "if", "k_choices", "<", "2", ":", "raise", "ValueError", "(", "\"The nu...
Runs checks on `k_choices`
[ "Runs", "checks", "on", "k_choices" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/strategy.py#L123-L135
train
222,057
SALib/SALib
src/SALib/sample/morris/strategy.py
Strategy._make_index_list
def _make_index_list(num_samples, num_params, num_groups=None): """Identify indices of input sample associated with each trajectory For each trajectory, identifies the indexes of the input sample which is a function of the number of factors/groups and the number of samples Arguments --------- num_samples : int The number of trajectories num_params : int The number of parameters num_groups : int The number of groups Returns ------- list of numpy.ndarray Example ------- >>> BruteForce()._make_index_list(num_samples=4, num_params=3, num_groups=2) [np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8]), np.array([9, 10, 11])] """ if num_groups is None: num_groups = num_params index_list = [] for j in range(num_samples): index_list.append(np.arange(num_groups + 1) + j * (num_groups + 1)) return index_list
python
def _make_index_list(num_samples, num_params, num_groups=None): """Identify indices of input sample associated with each trajectory For each trajectory, identifies the indexes of the input sample which is a function of the number of factors/groups and the number of samples Arguments --------- num_samples : int The number of trajectories num_params : int The number of parameters num_groups : int The number of groups Returns ------- list of numpy.ndarray Example ------- >>> BruteForce()._make_index_list(num_samples=4, num_params=3, num_groups=2) [np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8]), np.array([9, 10, 11])] """ if num_groups is None: num_groups = num_params index_list = [] for j in range(num_samples): index_list.append(np.arange(num_groups + 1) + j * (num_groups + 1)) return index_list
[ "def", "_make_index_list", "(", "num_samples", ",", "num_params", ",", "num_groups", "=", "None", ")", ":", "if", "num_groups", "is", "None", ":", "num_groups", "=", "num_params", "index_list", "=", "[", "]", "for", "j", "in", "range", "(", "num_samples", ...
Identify indices of input sample associated with each trajectory For each trajectory, identifies the indexes of the input sample which is a function of the number of factors/groups and the number of samples Arguments --------- num_samples : int The number of trajectories num_params : int The number of parameters num_groups : int The number of groups Returns ------- list of numpy.ndarray Example ------- >>> BruteForce()._make_index_list(num_samples=4, num_params=3, num_groups=2) [np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8]), np.array([9, 10, 11])]
[ "Identify", "indices", "of", "input", "sample", "associated", "with", "each", "trajectory" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/strategy.py#L138-L170
train
222,058
SALib/SALib
src/SALib/sample/morris/strategy.py
Strategy.compile_output
def compile_output(self, input_sample, num_samples, num_params, maximum_combo, num_groups=None): """Picks the trajectories from the input Arguments --------- input_sample : numpy.ndarray num_samples : int num_params : int maximum_combo : list num_groups : int """ if num_groups is None: num_groups = num_params self.check_input_sample(input_sample, num_groups, num_samples) index_list = self._make_index_list(num_samples, num_params, num_groups) output = np.zeros( (np.size(maximum_combo) * (num_groups + 1), num_params)) for counter, combo in enumerate(maximum_combo): output[index_list[counter]] = np.array( input_sample[index_list[combo]]) return output
python
def compile_output(self, input_sample, num_samples, num_params, maximum_combo, num_groups=None): """Picks the trajectories from the input Arguments --------- input_sample : numpy.ndarray num_samples : int num_params : int maximum_combo : list num_groups : int """ if num_groups is None: num_groups = num_params self.check_input_sample(input_sample, num_groups, num_samples) index_list = self._make_index_list(num_samples, num_params, num_groups) output = np.zeros( (np.size(maximum_combo) * (num_groups + 1), num_params)) for counter, combo in enumerate(maximum_combo): output[index_list[counter]] = np.array( input_sample[index_list[combo]]) return output
[ "def", "compile_output", "(", "self", ",", "input_sample", ",", "num_samples", ",", "num_params", ",", "maximum_combo", ",", "num_groups", "=", "None", ")", ":", "if", "num_groups", "is", "None", ":", "num_groups", "=", "num_params", "self", ".", "check_input_...
Picks the trajectories from the input Arguments --------- input_sample : numpy.ndarray num_samples : int num_params : int maximum_combo : list num_groups : int
[ "Picks", "the", "trajectories", "from", "the", "input" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/strategy.py#L172-L198
train
222,059
SALib/SALib
src/SALib/sample/morris/strategy.py
Strategy.check_input_sample
def check_input_sample(input_sample, num_params, num_samples): """Check the `input_sample` is valid Checks input sample is: - the correct size - values between 0 and 1 Arguments --------- input_sample : numpy.ndarray num_params : int num_samples : int """ assert type(input_sample) == np.ndarray, \ "Input sample is not an numpy array" assert input_sample.shape[0] == (num_params + 1) * num_samples, \ "Input sample does not match number of parameters or groups" assert np.any((input_sample >= 0) | (input_sample <= 1)), \ "Input sample must be scaled between 0 and 1"
python
def check_input_sample(input_sample, num_params, num_samples): """Check the `input_sample` is valid Checks input sample is: - the correct size - values between 0 and 1 Arguments --------- input_sample : numpy.ndarray num_params : int num_samples : int """ assert type(input_sample) == np.ndarray, \ "Input sample is not an numpy array" assert input_sample.shape[0] == (num_params + 1) * num_samples, \ "Input sample does not match number of parameters or groups" assert np.any((input_sample >= 0) | (input_sample <= 1)), \ "Input sample must be scaled between 0 and 1"
[ "def", "check_input_sample", "(", "input_sample", ",", "num_params", ",", "num_samples", ")", ":", "assert", "type", "(", "input_sample", ")", "==", "np", ".", "ndarray", ",", "\"Input sample is not an numpy array\"", "assert", "input_sample", ".", "shape", "[", "...
Check the `input_sample` is valid Checks input sample is: - the correct size - values between 0 and 1 Arguments --------- input_sample : numpy.ndarray num_params : int num_samples : int
[ "Check", "the", "input_sample", "is", "valid" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/strategy.py#L201-L219
train
222,060
SALib/SALib
src/SALib/sample/morris/strategy.py
Strategy.compute_distance
def compute_distance(m, l): '''Compute distance between two trajectories Returns ------- numpy.ndarray ''' if np.shape(m) != np.shape(l): raise ValueError("Input matrices are different sizes") if np.array_equal(m, l): # print("Trajectory %s and %s are equal" % (m, l)) distance = 0 else: distance = np.array(np.sum(cdist(m, l)), dtype=np.float32) return distance
python
def compute_distance(m, l): '''Compute distance between two trajectories Returns ------- numpy.ndarray ''' if np.shape(m) != np.shape(l): raise ValueError("Input matrices are different sizes") if np.array_equal(m, l): # print("Trajectory %s and %s are equal" % (m, l)) distance = 0 else: distance = np.array(np.sum(cdist(m, l)), dtype=np.float32) return distance
[ "def", "compute_distance", "(", "m", ",", "l", ")", ":", "if", "np", ".", "shape", "(", "m", ")", "!=", "np", ".", "shape", "(", "l", ")", ":", "raise", "ValueError", "(", "\"Input matrices are different sizes\"", ")", "if", "np", ".", "array_equal", "...
Compute distance between two trajectories Returns ------- numpy.ndarray
[ "Compute", "distance", "between", "two", "trajectories" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/strategy.py#L222-L238
train
222,061
SALib/SALib
src/SALib/sample/morris/strategy.py
Strategy.compute_distance_matrix
def compute_distance_matrix(self, input_sample, num_samples, num_params, num_groups=None, local_optimization=False): """Computes the distance between each and every trajectory Each entry in the matrix represents the sum of the geometric distances between all the pairs of points of the two trajectories If the `groups` argument is filled, then the distances are still calculated for each trajectory, Arguments --------- input_sample : numpy.ndarray The input sample of trajectories for which to compute the distance matrix num_samples : int The number of trajectories num_params : int The number of factors num_groups : int, default=None The number of groups local_optimization : bool, default=False If True, fills the lower triangle of the distance matrix Returns ------- distance_matrix : numpy.ndarray """ if num_groups: self.check_input_sample(input_sample, num_groups, num_samples) else: self.check_input_sample(input_sample, num_params, num_samples) index_list = self._make_index_list(num_samples, num_params, num_groups) distance_matrix = np.zeros( (num_samples, num_samples), dtype=np.float32) for j in range(num_samples): input_1 = input_sample[index_list[j]] for k in range(j + 1, num_samples): input_2 = input_sample[index_list[k]] # Fills the lower triangle of the matrix if local_optimization is True: distance_matrix[j, k] = self.compute_distance( input_1, input_2) distance_matrix[k, j] = self.compute_distance(input_1, input_2) return distance_matrix
python
def compute_distance_matrix(self, input_sample, num_samples, num_params, num_groups=None, local_optimization=False): """Computes the distance between each and every trajectory Each entry in the matrix represents the sum of the geometric distances between all the pairs of points of the two trajectories If the `groups` argument is filled, then the distances are still calculated for each trajectory, Arguments --------- input_sample : numpy.ndarray The input sample of trajectories for which to compute the distance matrix num_samples : int The number of trajectories num_params : int The number of factors num_groups : int, default=None The number of groups local_optimization : bool, default=False If True, fills the lower triangle of the distance matrix Returns ------- distance_matrix : numpy.ndarray """ if num_groups: self.check_input_sample(input_sample, num_groups, num_samples) else: self.check_input_sample(input_sample, num_params, num_samples) index_list = self._make_index_list(num_samples, num_params, num_groups) distance_matrix = np.zeros( (num_samples, num_samples), dtype=np.float32) for j in range(num_samples): input_1 = input_sample[index_list[j]] for k in range(j + 1, num_samples): input_2 = input_sample[index_list[k]] # Fills the lower triangle of the matrix if local_optimization is True: distance_matrix[j, k] = self.compute_distance( input_1, input_2) distance_matrix[k, j] = self.compute_distance(input_1, input_2) return distance_matrix
[ "def", "compute_distance_matrix", "(", "self", ",", "input_sample", ",", "num_samples", ",", "num_params", ",", "num_groups", "=", "None", ",", "local_optimization", "=", "False", ")", ":", "if", "num_groups", ":", "self", ".", "check_input_sample", "(", "input_...
Computes the distance between each and every trajectory Each entry in the matrix represents the sum of the geometric distances between all the pairs of points of the two trajectories If the `groups` argument is filled, then the distances are still calculated for each trajectory, Arguments --------- input_sample : numpy.ndarray The input sample of trajectories for which to compute the distance matrix num_samples : int The number of trajectories num_params : int The number of factors num_groups : int, default=None The number of groups local_optimization : bool, default=False If True, fills the lower triangle of the distance matrix Returns ------- distance_matrix : numpy.ndarray
[ "Computes", "the", "distance", "between", "each", "and", "every", "trajectory" ]
9744d73bb17cfcffc8282c7dc4a727efdc4bea3f
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/strategy.py#L240-L290
train
222,062
nicodv/kmodes
kmodes/kmodes.py
move_point_cat
def move_point_cat(point, ipoint, to_clust, from_clust, cl_attr_freq, membship, centroids): """Move point between clusters, categorical attributes.""" membship[to_clust, ipoint] = 1 membship[from_clust, ipoint] = 0 # Update frequencies of attributes in cluster. for iattr, curattr in enumerate(point): to_attr_counts = cl_attr_freq[to_clust][iattr] from_attr_counts = cl_attr_freq[from_clust][iattr] # Increment the attribute count for the new "to" cluster to_attr_counts[curattr] += 1 current_attribute_value_freq = to_attr_counts[curattr] current_centroid_value = centroids[to_clust][iattr] current_centroid_freq = to_attr_counts[current_centroid_value] if current_centroid_freq < current_attribute_value_freq: # We have incremented this value to the new mode. Update the centroid. centroids[to_clust][iattr] = curattr # Decrement the attribute count for the old "from" cluster from_attr_counts[curattr] -= 1 old_centroid_value = centroids[from_clust][iattr] if old_centroid_value == curattr: # We have just removed a count from the old centroid value. We need to # recalculate the centroid as it may no longer be the maximum centroids[from_clust][iattr] = get_max_value_key(from_attr_counts) return cl_attr_freq, membship, centroids
python
def move_point_cat(point, ipoint, to_clust, from_clust, cl_attr_freq, membship, centroids): """Move point between clusters, categorical attributes.""" membship[to_clust, ipoint] = 1 membship[from_clust, ipoint] = 0 # Update frequencies of attributes in cluster. for iattr, curattr in enumerate(point): to_attr_counts = cl_attr_freq[to_clust][iattr] from_attr_counts = cl_attr_freq[from_clust][iattr] # Increment the attribute count for the new "to" cluster to_attr_counts[curattr] += 1 current_attribute_value_freq = to_attr_counts[curattr] current_centroid_value = centroids[to_clust][iattr] current_centroid_freq = to_attr_counts[current_centroid_value] if current_centroid_freq < current_attribute_value_freq: # We have incremented this value to the new mode. Update the centroid. centroids[to_clust][iattr] = curattr # Decrement the attribute count for the old "from" cluster from_attr_counts[curattr] -= 1 old_centroid_value = centroids[from_clust][iattr] if old_centroid_value == curattr: # We have just removed a count from the old centroid value. We need to # recalculate the centroid as it may no longer be the maximum centroids[from_clust][iattr] = get_max_value_key(from_attr_counts) return cl_attr_freq, membship, centroids
[ "def", "move_point_cat", "(", "point", ",", "ipoint", ",", "to_clust", ",", "from_clust", ",", "cl_attr_freq", ",", "membship", ",", "centroids", ")", ":", "membship", "[", "to_clust", ",", "ipoint", "]", "=", "1", "membship", "[", "from_clust", ",", "ipoi...
Move point between clusters, categorical attributes.
[ "Move", "point", "between", "clusters", "categorical", "attributes", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L83-L112
train
222,063
nicodv/kmodes
kmodes/kmodes.py
_labels_cost
def _labels_cost(X, centroids, dissim, membship=None): """Calculate labels and cost function given a matrix of points and a list of centroids for the k-modes algorithm. """ X = check_array(X) n_points = X.shape[0] cost = 0. labels = np.empty(n_points, dtype=np.uint16) for ipoint, curpoint in enumerate(X): diss = dissim(centroids, curpoint, X=X, membship=membship) clust = np.argmin(diss) labels[ipoint] = clust cost += diss[clust] return labels, cost
python
def _labels_cost(X, centroids, dissim, membship=None): """Calculate labels and cost function given a matrix of points and a list of centroids for the k-modes algorithm. """ X = check_array(X) n_points = X.shape[0] cost = 0. labels = np.empty(n_points, dtype=np.uint16) for ipoint, curpoint in enumerate(X): diss = dissim(centroids, curpoint, X=X, membship=membship) clust = np.argmin(diss) labels[ipoint] = clust cost += diss[clust] return labels, cost
[ "def", "_labels_cost", "(", "X", ",", "centroids", ",", "dissim", ",", "membship", "=", "None", ")", ":", "X", "=", "check_array", "(", "X", ")", "n_points", "=", "X", ".", "shape", "[", "0", "]", "cost", "=", "0.", "labels", "=", "np", ".", "emp...
Calculate labels and cost function given a matrix of points and a list of centroids for the k-modes algorithm.
[ "Calculate", "labels", "and", "cost", "function", "given", "a", "matrix", "of", "points", "and", "a", "list", "of", "centroids", "for", "the", "k", "-", "modes", "algorithm", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L115-L131
train
222,064
nicodv/kmodes
kmodes/kmodes.py
_k_modes_iter
def _k_modes_iter(X, centroids, cl_attr_freq, membship, dissim, random_state): """Single iteration of k-modes clustering algorithm""" moves = 0 for ipoint, curpoint in enumerate(X): clust = np.argmin(dissim(centroids, curpoint, X=X, membship=membship)) if membship[clust, ipoint]: # Point is already in its right place. continue # Move point, and update old/new cluster frequencies and centroids. moves += 1 old_clust = np.argwhere(membship[:, ipoint])[0][0] cl_attr_freq, membship, centroids = move_point_cat( curpoint, ipoint, clust, old_clust, cl_attr_freq, membship, centroids ) # In case of an empty cluster, reinitialize with a random point # from the largest cluster. if not membship[old_clust, :].any(): from_clust = membship.sum(axis=1).argmax() choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch] rindx = random_state.choice(choices) cl_attr_freq, membship, centroids = move_point_cat( X[rindx], rindx, old_clust, from_clust, cl_attr_freq, membship, centroids ) return centroids, moves
python
def _k_modes_iter(X, centroids, cl_attr_freq, membship, dissim, random_state): """Single iteration of k-modes clustering algorithm""" moves = 0 for ipoint, curpoint in enumerate(X): clust = np.argmin(dissim(centroids, curpoint, X=X, membship=membship)) if membship[clust, ipoint]: # Point is already in its right place. continue # Move point, and update old/new cluster frequencies and centroids. moves += 1 old_clust = np.argwhere(membship[:, ipoint])[0][0] cl_attr_freq, membship, centroids = move_point_cat( curpoint, ipoint, clust, old_clust, cl_attr_freq, membship, centroids ) # In case of an empty cluster, reinitialize with a random point # from the largest cluster. if not membship[old_clust, :].any(): from_clust = membship.sum(axis=1).argmax() choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch] rindx = random_state.choice(choices) cl_attr_freq, membship, centroids = move_point_cat( X[rindx], rindx, old_clust, from_clust, cl_attr_freq, membship, centroids ) return centroids, moves
[ "def", "_k_modes_iter", "(", "X", ",", "centroids", ",", "cl_attr_freq", ",", "membship", ",", "dissim", ",", "random_state", ")", ":", "moves", "=", "0", "for", "ipoint", ",", "curpoint", "in", "enumerate", "(", "X", ")", ":", "clust", "=", "np", ".",...
Single iteration of k-modes clustering algorithm
[ "Single", "iteration", "of", "k", "-", "modes", "clustering", "algorithm" ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L134-L162
train
222,065
nicodv/kmodes
kmodes/kmodes.py
k_modes
def k_modes(X, n_clusters, max_iter, dissim, init, n_init, verbose, random_state, n_jobs): """k-modes algorithm""" random_state = check_random_state(random_state) if sparse.issparse(X): raise TypeError("k-modes does not support sparse data.") X = check_array(X, dtype=None) # Convert the categorical values in X to integers for speed. # Based on the unique values in X, we can make a mapping to achieve this. X, enc_map = encode_features(X) n_points, n_attrs = X.shape assert n_clusters <= n_points, "Cannot have more clusters ({}) " \ "than data points ({}).".format(n_clusters, n_points) # Are there more n_clusters than unique rows? Then set the unique # rows as initial values and skip iteration. unique = get_unique_rows(X) n_unique = unique.shape[0] if n_unique <= n_clusters: max_iter = 0 n_init = 1 n_clusters = n_unique init = unique results = [] seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) if n_jobs == 1: for init_no in range(n_init): results.append(k_modes_single(X, n_clusters, n_points, n_attrs, max_iter, dissim, init, init_no, verbose, seeds[init_no])) else: results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(k_modes_single)(X, n_clusters, n_points, n_attrs, max_iter, dissim, init, init_no, verbose, seed) for init_no, seed in enumerate(seeds)) all_centroids, all_labels, all_costs, all_n_iters = zip(*results) best = np.argmin(all_costs) if n_init > 1 and verbose: print("Best run was number {}".format(best + 1)) return all_centroids[best], enc_map, all_labels[best], \ all_costs[best], all_n_iters[best]
python
def k_modes(X, n_clusters, max_iter, dissim, init, n_init, verbose, random_state, n_jobs): """k-modes algorithm""" random_state = check_random_state(random_state) if sparse.issparse(X): raise TypeError("k-modes does not support sparse data.") X = check_array(X, dtype=None) # Convert the categorical values in X to integers for speed. # Based on the unique values in X, we can make a mapping to achieve this. X, enc_map = encode_features(X) n_points, n_attrs = X.shape assert n_clusters <= n_points, "Cannot have more clusters ({}) " \ "than data points ({}).".format(n_clusters, n_points) # Are there more n_clusters than unique rows? Then set the unique # rows as initial values and skip iteration. unique = get_unique_rows(X) n_unique = unique.shape[0] if n_unique <= n_clusters: max_iter = 0 n_init = 1 n_clusters = n_unique init = unique results = [] seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) if n_jobs == 1: for init_no in range(n_init): results.append(k_modes_single(X, n_clusters, n_points, n_attrs, max_iter, dissim, init, init_no, verbose, seeds[init_no])) else: results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(k_modes_single)(X, n_clusters, n_points, n_attrs, max_iter, dissim, init, init_no, verbose, seed) for init_no, seed in enumerate(seeds)) all_centroids, all_labels, all_costs, all_n_iters = zip(*results) best = np.argmin(all_costs) if n_init > 1 and verbose: print("Best run was number {}".format(best + 1)) return all_centroids[best], enc_map, all_labels[best], \ all_costs[best], all_n_iters[best]
[ "def", "k_modes", "(", "X", ",", "n_clusters", ",", "max_iter", ",", "dissim", ",", "init", ",", "n_init", ",", "verbose", ",", "random_state", ",", "n_jobs", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "if", "sparse", "....
k-modes algorithm
[ "k", "-", "modes", "algorithm" ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L243-L287
train
222,066
nicodv/kmodes
kmodes/kmodes.py
KModes.fit
def fit(self, X, y=None, **kwargs): """Compute k-modes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features] """ X = pandas_to_numpy(X) random_state = check_random_state(self.random_state) self._enc_cluster_centroids, self._enc_map, self.labels_,\ self.cost_, self.n_iter_ = k_modes(X, self.n_clusters, self.max_iter, self.cat_dissim, self.init, self.n_init, self.verbose, random_state, self.n_jobs) return self
python
def fit(self, X, y=None, **kwargs): """Compute k-modes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features] """ X = pandas_to_numpy(X) random_state = check_random_state(self.random_state) self._enc_cluster_centroids, self._enc_map, self.labels_,\ self.cost_, self.n_iter_ = k_modes(X, self.n_clusters, self.max_iter, self.cat_dissim, self.init, self.n_init, self.verbose, random_state, self.n_jobs) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "X", "=", "pandas_to_numpy", "(", "X", ")", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "self", ".", "_enc_cluster_cent...
Compute k-modes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features]
[ "Compute", "k", "-", "modes", "clustering", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L381-L401
train
222,067
nicodv/kmodes
kmodes/kmodes.py
KModes.fit_predict
def fit_predict(self, X, y=None, **kwargs): """Compute cluster centroids and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). """ return self.fit(X, **kwargs).predict(X, **kwargs)
python
def fit_predict(self, X, y=None, **kwargs): """Compute cluster centroids and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). """ return self.fit(X, **kwargs).predict(X, **kwargs)
[ "def", "fit_predict", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "fit", "(", "X", ",", "*", "*", "kwargs", ")", ".", "predict", "(", "X", ",", "*", "*", "kwargs", ")" ]
Compute cluster centroids and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X).
[ "Compute", "cluster", "centroids", "and", "predict", "cluster", "index", "for", "each", "sample", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L403-L409
train
222,068
nicodv/kmodes
kmodes/util/__init__.py
get_max_value_key
def get_max_value_key(dic): """Gets the key for the maximum value in a dict.""" v = np.array(list(dic.values())) k = np.array(list(dic.keys())) maxima = np.where(v == np.max(v))[0] if len(maxima) == 1: return k[maxima[0]] else: # In order to be consistent, always selects the minimum key # (guaranteed to be unique) when there are multiple maximum values. return k[maxima[np.argmin(k[maxima])]]
python
def get_max_value_key(dic): """Gets the key for the maximum value in a dict.""" v = np.array(list(dic.values())) k = np.array(list(dic.keys())) maxima = np.where(v == np.max(v))[0] if len(maxima) == 1: return k[maxima[0]] else: # In order to be consistent, always selects the minimum key # (guaranteed to be unique) when there are multiple maximum values. return k[maxima[np.argmin(k[maxima])]]
[ "def", "get_max_value_key", "(", "dic", ")", ":", "v", "=", "np", ".", "array", "(", "list", "(", "dic", ".", "values", "(", ")", ")", ")", "k", "=", "np", ".", "array", "(", "list", "(", "dic", ".", "keys", "(", ")", ")", ")", "maxima", "=",...
Gets the key for the maximum value in a dict.
[ "Gets", "the", "key", "for", "the", "maximum", "value", "in", "a", "dict", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/util/__init__.py#L12-L23
train
222,069
nicodv/kmodes
kmodes/util/__init__.py
decode_centroids
def decode_centroids(encoded, mapping): """Decodes the encoded centroids array back to the original data labels using a list of mappings. """ decoded = [] for ii in range(encoded.shape[1]): # Invert the mapping so that we can decode. inv_mapping = {v: k for k, v in mapping[ii].items()} decoded.append(np.vectorize(inv_mapping.__getitem__)(encoded[:, ii])) return np.atleast_2d(np.array(decoded)).T
python
def decode_centroids(encoded, mapping): """Decodes the encoded centroids array back to the original data labels using a list of mappings. """ decoded = [] for ii in range(encoded.shape[1]): # Invert the mapping so that we can decode. inv_mapping = {v: k for k, v in mapping[ii].items()} decoded.append(np.vectorize(inv_mapping.__getitem__)(encoded[:, ii])) return np.atleast_2d(np.array(decoded)).T
[ "def", "decode_centroids", "(", "encoded", ",", "mapping", ")", ":", "decoded", "=", "[", "]", "for", "ii", "in", "range", "(", "encoded", ".", "shape", "[", "1", "]", ")", ":", "# Invert the mapping so that we can decode.", "inv_mapping", "=", "{", "v", "...
Decodes the encoded centroids array back to the original data labels using a list of mappings.
[ "Decodes", "the", "encoded", "centroids", "array", "back", "to", "the", "original", "data", "labels", "using", "a", "list", "of", "mappings", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/util/__init__.py#L54-L63
train
222,070
nicodv/kmodes
kmodes/kprototypes.py
move_point_num
def move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum): """Move point between clusters, numerical attributes.""" # Update sum of attributes in cluster. for iattr, curattr in enumerate(point): cl_attr_sum[to_clust][iattr] += curattr cl_attr_sum[from_clust][iattr] -= curattr # Update sums of memberships in cluster cl_memb_sum[to_clust] += 1 cl_memb_sum[from_clust] -= 1 return cl_attr_sum, cl_memb_sum
python
def move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum): """Move point between clusters, numerical attributes.""" # Update sum of attributes in cluster. for iattr, curattr in enumerate(point): cl_attr_sum[to_clust][iattr] += curattr cl_attr_sum[from_clust][iattr] -= curattr # Update sums of memberships in cluster cl_memb_sum[to_clust] += 1 cl_memb_sum[from_clust] -= 1 return cl_attr_sum, cl_memb_sum
[ "def", "move_point_num", "(", "point", ",", "to_clust", ",", "from_clust", ",", "cl_attr_sum", ",", "cl_memb_sum", ")", ":", "# Update sum of attributes in cluster.", "for", "iattr", ",", "curattr", "in", "enumerate", "(", "point", ")", ":", "cl_attr_sum", "[", ...
Move point between clusters, numerical attributes.
[ "Move", "point", "between", "clusters", "numerical", "attributes", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L28-L37
train
222,071
nicodv/kmodes
kmodes/kprototypes.py
_split_num_cat
def _split_num_cat(X, categorical): """Extract numerical and categorical columns. Convert to numpy arrays, if needed. :param X: Feature matrix :param categorical: Indices of categorical columns """ Xnum = np.asanyarray(X[:, [ii for ii in range(X.shape[1]) if ii not in categorical]]).astype(np.float64) Xcat = np.asanyarray(X[:, categorical]) return Xnum, Xcat
python
def _split_num_cat(X, categorical): """Extract numerical and categorical columns. Convert to numpy arrays, if needed. :param X: Feature matrix :param categorical: Indices of categorical columns """ Xnum = np.asanyarray(X[:, [ii for ii in range(X.shape[1]) if ii not in categorical]]).astype(np.float64) Xcat = np.asanyarray(X[:, categorical]) return Xnum, Xcat
[ "def", "_split_num_cat", "(", "X", ",", "categorical", ")", ":", "Xnum", "=", "np", ".", "asanyarray", "(", "X", "[", ":", ",", "[", "ii", "for", "ii", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", "if", "ii", "not", "in", "categor...
Extract numerical and categorical columns. Convert to numpy arrays, if needed. :param X: Feature matrix :param categorical: Indices of categorical columns
[ "Extract", "numerical", "and", "categorical", "columns", ".", "Convert", "to", "numpy", "arrays", "if", "needed", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L40-L50
train
222,072
nicodv/kmodes
kmodes/kprototypes.py
_labels_cost
def _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None): """Calculate labels and cost function given a matrix of points and a list of centroids for the k-prototypes algorithm. """ n_points = Xnum.shape[0] Xnum = check_array(Xnum) cost = 0. labels = np.empty(n_points, dtype=np.uint16) for ipoint in range(n_points): # Numerical cost = sum of Euclidean distances num_costs = num_dissim(centroids[0], Xnum[ipoint]) cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship) # Gamma relates the categorical cost to the numerical cost. tot_costs = num_costs + gamma * cat_costs clust = np.argmin(tot_costs) labels[ipoint] = clust cost += tot_costs[clust] return labels, cost
python
def _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None): """Calculate labels and cost function given a matrix of points and a list of centroids for the k-prototypes algorithm. """ n_points = Xnum.shape[0] Xnum = check_array(Xnum) cost = 0. labels = np.empty(n_points, dtype=np.uint16) for ipoint in range(n_points): # Numerical cost = sum of Euclidean distances num_costs = num_dissim(centroids[0], Xnum[ipoint]) cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship) # Gamma relates the categorical cost to the numerical cost. tot_costs = num_costs + gamma * cat_costs clust = np.argmin(tot_costs) labels[ipoint] = clust cost += tot_costs[clust] return labels, cost
[ "def", "_labels_cost", "(", "Xnum", ",", "Xcat", ",", "centroids", ",", "num_dissim", ",", "cat_dissim", ",", "gamma", ",", "membship", "=", "None", ")", ":", "n_points", "=", "Xnum", ".", "shape", "[", "0", "]", "Xnum", "=", "check_array", "(", "Xnum"...
Calculate labels and cost function given a matrix of points and a list of centroids for the k-prototypes algorithm.
[ "Calculate", "labels", "and", "cost", "function", "given", "a", "matrix", "of", "points", "and", "a", "list", "of", "centroids", "for", "the", "k", "-", "prototypes", "algorithm", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L53-L73
train
222,073
nicodv/kmodes
kmodes/kprototypes.py
_k_prototypes_iter
def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_memb_sum, cl_attr_freq, membship, num_dissim, cat_dissim, gamma, random_state): """Single iteration of the k-prototypes algorithm""" moves = 0 for ipoint in range(Xnum.shape[0]): clust = np.argmin( num_dissim(centroids[0], Xnum[ipoint]) + gamma * cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship) ) if membship[clust, ipoint]: # Point is already in its right place. continue # Move point, and update old/new cluster frequencies and centroids. moves += 1 old_clust = np.argwhere(membship[:, ipoint])[0][0] # Note that membship gets updated by kmodes.move_point_cat. # move_point_num only updates things specific to the k-means part. cl_attr_sum, cl_memb_sum = move_point_num( Xnum[ipoint], clust, old_clust, cl_attr_sum, cl_memb_sum ) cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat( Xcat[ipoint], ipoint, clust, old_clust, cl_attr_freq, membship, centroids[1] ) # Update old and new centroids for numerical attributes using # the means and sums of all values for iattr in range(len(Xnum[ipoint])): for curc in (clust, old_clust): if cl_memb_sum[curc]: centroids[0][curc, iattr] = cl_attr_sum[curc, iattr] / cl_memb_sum[curc] else: centroids[0][curc, iattr] = 0. # In case of an empty cluster, reinitialize with a random point # from largest cluster. if not cl_memb_sum[old_clust]: from_clust = membship.sum(axis=1).argmax() choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch] rindx = random_state.choice(choices) cl_attr_sum, cl_memb_sum = move_point_num( Xnum[rindx], old_clust, from_clust, cl_attr_sum, cl_memb_sum ) cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat( Xcat[rindx], rindx, old_clust, from_clust, cl_attr_freq, membship, centroids[1] ) return centroids, moves
python
def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_memb_sum, cl_attr_freq, membship, num_dissim, cat_dissim, gamma, random_state): """Single iteration of the k-prototypes algorithm""" moves = 0 for ipoint in range(Xnum.shape[0]): clust = np.argmin( num_dissim(centroids[0], Xnum[ipoint]) + gamma * cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship) ) if membship[clust, ipoint]: # Point is already in its right place. continue # Move point, and update old/new cluster frequencies and centroids. moves += 1 old_clust = np.argwhere(membship[:, ipoint])[0][0] # Note that membship gets updated by kmodes.move_point_cat. # move_point_num only updates things specific to the k-means part. cl_attr_sum, cl_memb_sum = move_point_num( Xnum[ipoint], clust, old_clust, cl_attr_sum, cl_memb_sum ) cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat( Xcat[ipoint], ipoint, clust, old_clust, cl_attr_freq, membship, centroids[1] ) # Update old and new centroids for numerical attributes using # the means and sums of all values for iattr in range(len(Xnum[ipoint])): for curc in (clust, old_clust): if cl_memb_sum[curc]: centroids[0][curc, iattr] = cl_attr_sum[curc, iattr] / cl_memb_sum[curc] else: centroids[0][curc, iattr] = 0. # In case of an empty cluster, reinitialize with a random point # from largest cluster. if not cl_memb_sum[old_clust]: from_clust = membship.sum(axis=1).argmax() choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch] rindx = random_state.choice(choices) cl_attr_sum, cl_memb_sum = move_point_num( Xnum[rindx], old_clust, from_clust, cl_attr_sum, cl_memb_sum ) cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat( Xcat[rindx], rindx, old_clust, from_clust, cl_attr_freq, membship, centroids[1] ) return centroids, moves
[ "def", "_k_prototypes_iter", "(", "Xnum", ",", "Xcat", ",", "centroids", ",", "cl_attr_sum", ",", "cl_memb_sum", ",", "cl_attr_freq", ",", "membship", ",", "num_dissim", ",", "cat_dissim", ",", "gamma", ",", "random_state", ")", ":", "moves", "=", "0", "for"...
Single iteration of the k-prototypes algorithm
[ "Single", "iteration", "of", "the", "k", "-", "prototypes", "algorithm" ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L76-L127
train
222,074
nicodv/kmodes
kmodes/kprototypes.py
k_prototypes
def k_prototypes(X, categorical, n_clusters, max_iter, num_dissim, cat_dissim, gamma, init, n_init, verbose, random_state, n_jobs): """k-prototypes algorithm""" random_state = check_random_state(random_state) if sparse.issparse(X): raise TypeError("k-prototypes does not support sparse data.") if categorical is None or not categorical: raise NotImplementedError( "No categorical data selected, effectively doing k-means. " "Present a list of categorical columns, or use scikit-learn's " "KMeans instead." ) if isinstance(categorical, int): categorical = [categorical] assert len(categorical) != X.shape[1], \ "All columns are categorical, use k-modes instead of k-prototypes." assert max(categorical) < X.shape[1], \ "Categorical index larger than number of columns." ncatattrs = len(categorical) nnumattrs = X.shape[1] - ncatattrs n_points = X.shape[0] assert n_clusters <= n_points, "Cannot have more clusters ({}) " \ "than data points ({}).".format(n_clusters, n_points) Xnum, Xcat = _split_num_cat(X, categorical) Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None) # Convert the categorical values in Xcat to integers for speed. # Based on the unique values in Xcat, we can make a mapping to achieve this. Xcat, enc_map = encode_features(Xcat) # Are there more n_clusters than unique rows? Then set the unique # rows as initial values and skip iteration. unique = get_unique_rows(X) n_unique = unique.shape[0] if n_unique <= n_clusters: max_iter = 0 n_init = 1 n_clusters = n_unique init = list(_split_num_cat(unique, categorical)) init[1], _ = encode_features(init[1], enc_map) # Estimate a good value for gamma, which determines the weighing of # categorical values in clusters (see Huang [1997]). if gamma is None: gamma = 0.5 * Xnum.std() results = [] seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) if n_jobs == 1: for init_no in range(n_init): results.append(k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points, max_iter, num_dissim, cat_dissim, gamma, init, init_no, verbose, seeds[init_no])) else: results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(k_prototypes_single)(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points, max_iter, num_dissim, cat_dissim, gamma, init, init_no, verbose, seed) for init_no, seed in enumerate(seeds)) all_centroids, all_labels, all_costs, all_n_iters = zip(*results) best = np.argmin(all_costs) if n_init > 1 and verbose: print("Best run was number {}".format(best + 1)) # Note: return gamma in case it was automatically determined. return all_centroids[best], enc_map, all_labels[best], \ all_costs[best], all_n_iters[best], gamma
python
def k_prototypes(X, categorical, n_clusters, max_iter, num_dissim, cat_dissim, gamma, init, n_init, verbose, random_state, n_jobs): """k-prototypes algorithm""" random_state = check_random_state(random_state) if sparse.issparse(X): raise TypeError("k-prototypes does not support sparse data.") if categorical is None or not categorical: raise NotImplementedError( "No categorical data selected, effectively doing k-means. " "Present a list of categorical columns, or use scikit-learn's " "KMeans instead." ) if isinstance(categorical, int): categorical = [categorical] assert len(categorical) != X.shape[1], \ "All columns are categorical, use k-modes instead of k-prototypes." assert max(categorical) < X.shape[1], \ "Categorical index larger than number of columns." ncatattrs = len(categorical) nnumattrs = X.shape[1] - ncatattrs n_points = X.shape[0] assert n_clusters <= n_points, "Cannot have more clusters ({}) " \ "than data points ({}).".format(n_clusters, n_points) Xnum, Xcat = _split_num_cat(X, categorical) Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None) # Convert the categorical values in Xcat to integers for speed. # Based on the unique values in Xcat, we can make a mapping to achieve this. Xcat, enc_map = encode_features(Xcat) # Are there more n_clusters than unique rows? Then set the unique # rows as initial values and skip iteration. unique = get_unique_rows(X) n_unique = unique.shape[0] if n_unique <= n_clusters: max_iter = 0 n_init = 1 n_clusters = n_unique init = list(_split_num_cat(unique, categorical)) init[1], _ = encode_features(init[1], enc_map) # Estimate a good value for gamma, which determines the weighing of # categorical values in clusters (see Huang [1997]). if gamma is None: gamma = 0.5 * Xnum.std() results = [] seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) if n_jobs == 1: for init_no in range(n_init): results.append(k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points, max_iter, num_dissim, cat_dissim, gamma, init, init_no, verbose, seeds[init_no])) else: results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(k_prototypes_single)(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points, max_iter, num_dissim, cat_dissim, gamma, init, init_no, verbose, seed) for init_no, seed in enumerate(seeds)) all_centroids, all_labels, all_costs, all_n_iters = zip(*results) best = np.argmin(all_costs) if n_init > 1 and verbose: print("Best run was number {}".format(best + 1)) # Note: return gamma in case it was automatically determined. return all_centroids[best], enc_map, all_labels[best], \ all_costs[best], all_n_iters[best], gamma
[ "def", "k_prototypes", "(", "X", ",", "categorical", ",", "n_clusters", ",", "max_iter", ",", "num_dissim", ",", "cat_dissim", ",", "gamma", ",", "init", ",", "n_init", ",", "verbose", ",", "random_state", ",", "n_jobs", ")", ":", "random_state", "=", "che...
k-prototypes algorithm
[ "k", "-", "prototypes", "algorithm" ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L255-L327
train
222,075
nicodv/kmodes
kmodes/kprototypes.py
KPrototypes.fit
def fit(self, X, y=None, categorical=None): """Compute k-prototypes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features] categorical : Index of columns that contain categorical data """ if categorical is not None: assert isinstance(categorical, (int, list, tuple)), "The 'categorical' \ argument needs to be an integer with the index of the categorical \ column in your data, or a list or tuple of several of them, \ but it is a {}.".format(type(categorical)) X = pandas_to_numpy(X) random_state = check_random_state(self.random_state) # If self.gamma is None, gamma will be automatically determined from # the data. The function below returns its value. self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\ self.n_iter_, self.gamma = k_prototypes(X, categorical, self.n_clusters, self.max_iter, self.num_dissim, self.cat_dissim, self.gamma, self.init, self.n_init, self.verbose, random_state, self.n_jobs) return self
python
def fit(self, X, y=None, categorical=None): """Compute k-prototypes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features] categorical : Index of columns that contain categorical data """ if categorical is not None: assert isinstance(categorical, (int, list, tuple)), "The 'categorical' \ argument needs to be an integer with the index of the categorical \ column in your data, or a list or tuple of several of them, \ but it is a {}.".format(type(categorical)) X = pandas_to_numpy(X) random_state = check_random_state(self.random_state) # If self.gamma is None, gamma will be automatically determined from # the data. The function below returns its value. self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\ self.n_iter_, self.gamma = k_prototypes(X, categorical, self.n_clusters, self.max_iter, self.num_dissim, self.cat_dissim, self.gamma, self.init, self.n_init, self.verbose, random_state, self.n_jobs) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "categorical", "=", "None", ")", ":", "if", "categorical", "is", "not", "None", ":", "assert", "isinstance", "(", "categorical", ",", "(", "int", ",", "list", ",", "tuple", ")", ")", ...
Compute k-prototypes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features] categorical : Index of columns that contain categorical data
[ "Compute", "k", "-", "prototypes", "clustering", "." ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L431-L463
train
222,076
nicodv/kmodes
kmodes/util/dissim.py
euclidean_dissim
def euclidean_dissim(a, b, **_): """Euclidean distance dissimilarity function""" if np.isnan(a).any() or np.isnan(b).any(): raise ValueError("Missing values detected in numerical columns.") return np.sum((a - b) ** 2, axis=1)
python
def euclidean_dissim(a, b, **_): """Euclidean distance dissimilarity function""" if np.isnan(a).any() or np.isnan(b).any(): raise ValueError("Missing values detected in numerical columns.") return np.sum((a - b) ** 2, axis=1)
[ "def", "euclidean_dissim", "(", "a", ",", "b", ",", "*", "*", "_", ")", ":", "if", "np", ".", "isnan", "(", "a", ")", ".", "any", "(", ")", "or", "np", ".", "isnan", "(", "b", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"M...
Euclidean distance dissimilarity function
[ "Euclidean", "distance", "dissimilarity", "function" ]
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/util/dissim.py#L13-L17
train
222,077
nicodv/kmodes
kmodes/util/dissim.py
ng_dissim
def ng_dissim(a, b, X=None, membship=None): """Ng et al.'s dissimilarity measure, as presented in Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3, January, 2007 This function can potentially speed up training convergence. Note that membship must be a rectangular array such that the len(membship) = len(a) and len(membship[i]) = X.shape[1] In case of missing membship, this function reverts back to matching dissimilarity (e.g., when predicting). """ # Without membership, revert to matching dissimilarity if membship is None: return matching_dissim(a, b) def calc_cjr(b, X, memj, idr): """Num objects w/ category value x_{i,r} for rth attr in jth cluster""" xcids = np.where(memj == 1) return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0)) def calc_dissim(b, X, memj, idr): # Size of jth cluster cj = float(np.sum(memj)) return (1.0 - (calc_cjr(b, X, memj, idr) / cj)) if cj != 0.0 else 0.0 if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]: raise ValueError("'membship' must be a rectangular array where " "the number of rows in 'membship' equals the " "number of rows in 'a' and the number of " "columns in 'membship' equals the number of rows in 'X'.") return np.array([np.array([calc_dissim(b, X, membship[idj], idr) if b[idr] == t else 1.0 for idr, t in enumerate(val_a)]).sum(0) for idj, val_a in enumerate(a)])
python
def ng_dissim(a, b, X=None, membship=None): """Ng et al.'s dissimilarity measure, as presented in Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3, January, 2007 This function can potentially speed up training convergence. Note that membship must be a rectangular array such that the len(membship) = len(a) and len(membship[i]) = X.shape[1] In case of missing membship, this function reverts back to matching dissimilarity (e.g., when predicting). """ # Without membership, revert to matching dissimilarity if membship is None: return matching_dissim(a, b) def calc_cjr(b, X, memj, idr): """Num objects w/ category value x_{i,r} for rth attr in jth cluster""" xcids = np.where(memj == 1) return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0)) def calc_dissim(b, X, memj, idr): # Size of jth cluster cj = float(np.sum(memj)) return (1.0 - (calc_cjr(b, X, memj, idr) / cj)) if cj != 0.0 else 0.0 if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]: raise ValueError("'membship' must be a rectangular array where " "the number of rows in 'membship' equals the " "number of rows in 'a' and the number of " "columns in 'membship' equals the number of rows in 'X'.") return np.array([np.array([calc_dissim(b, X, membship[idj], idr) if b[idr] == t else 1.0 for idr, t in enumerate(val_a)]).sum(0) for idj, val_a in enumerate(a)])
[ "def", "ng_dissim", "(", "a", ",", "b", ",", "X", "=", "None", ",", "membship", "=", "None", ")", ":", "# Without membership, revert to matching dissimilarity", "if", "membship", "is", "None", ":", "return", "matching_dissim", "(", "a", ",", "b", ")", "def",...
Ng et al.'s dissimilarity measure, as presented in Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3, January, 2007 This function can potentially speed up training convergence. Note that membship must be a rectangular array such that the len(membship) = len(a) and len(membship[i]) = X.shape[1] In case of missing membship, this function reverts back to matching dissimilarity (e.g., when predicting).
[ "Ng", "et", "al", ".", "s", "dissimilarity", "measure", "as", "presented", "in", "Michael", "K", ".", "Ng", "Mark", "Junjie", "Li", "Joshua", "Zhexue", "Huang", "and", "Zengyou", "He", "On", "the", "Impact", "of", "Dissimilarity", "Measure", "in", "k", "...
cdb19fe5448aba1bf501626694bb52e68eafab45
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/util/dissim.py#L20-L58
train
222,078
Bogdanp/dramatiq
dramatiq/results/backend.py
ResultBackend.store_result
def store_result(self, message, result: Result, ttl: int) -> None: """Store a result in the backend. Parameters: message(Message) result(object): Must be serializable. ttl(int): The maximum amount of time the result may be stored in the backend for. """ message_key = self.build_message_key(message) return self._store(message_key, result, ttl)
python
def store_result(self, message, result: Result, ttl: int) -> None: """Store a result in the backend. Parameters: message(Message) result(object): Must be serializable. ttl(int): The maximum amount of time the result may be stored in the backend for. """ message_key = self.build_message_key(message) return self._store(message_key, result, ttl)
[ "def", "store_result", "(", "self", ",", "message", ",", "result", ":", "Result", ",", "ttl", ":", "int", ")", "->", "None", ":", "message_key", "=", "self", ".", "build_message_key", "(", "message", ")", "return", "self", ".", "_store", "(", "message_ke...
Store a result in the backend. Parameters: message(Message) result(object): Must be serializable. ttl(int): The maximum amount of time the result may be stored in the backend for.
[ "Store", "a", "result", "in", "the", "backend", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/results/backend.py#L98-L108
train
222,079
Bogdanp/dramatiq
dramatiq/results/backend.py
ResultBackend.build_message_key
def build_message_key(self, message) -> str: """Given a message, return its globally-unique key. Parameters: message(Message) Returns: str """ message_key = "%(namespace)s:%(queue_name)s:%(actor_name)s:%(message_id)s" % { "namespace": self.namespace, "queue_name": q_name(message.queue_name), "actor_name": message.actor_name, "message_id": message.message_id, } return hashlib.md5(message_key.encode("utf-8")).hexdigest()
python
def build_message_key(self, message) -> str: """Given a message, return its globally-unique key. Parameters: message(Message) Returns: str """ message_key = "%(namespace)s:%(queue_name)s:%(actor_name)s:%(message_id)s" % { "namespace": self.namespace, "queue_name": q_name(message.queue_name), "actor_name": message.actor_name, "message_id": message.message_id, } return hashlib.md5(message_key.encode("utf-8")).hexdigest()
[ "def", "build_message_key", "(", "self", ",", "message", ")", "->", "str", ":", "message_key", "=", "\"%(namespace)s:%(queue_name)s:%(actor_name)s:%(message_id)s\"", "%", "{", "\"namespace\"", ":", "self", ".", "namespace", ",", "\"queue_name\"", ":", "q_name", "(", ...
Given a message, return its globally-unique key. Parameters: message(Message) Returns: str
[ "Given", "a", "message", "return", "its", "globally", "-", "unique", "key", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/results/backend.py#L110-L125
train
222,080
Bogdanp/dramatiq
dramatiq/results/backend.py
ResultBackend._store
def _store(self, message_key: str, result: Result, ttl: int) -> None: # pragma: no cover """Store a result in the backend. Subclasses may implement this method if they want to use the default implementation of set_result. """ raise NotImplementedError("%(classname)r does not implement _store()" % { "classname": type(self).__name__, })
python
def _store(self, message_key: str, result: Result, ttl: int) -> None: # pragma: no cover """Store a result in the backend. Subclasses may implement this method if they want to use the default implementation of set_result. """ raise NotImplementedError("%(classname)r does not implement _store()" % { "classname": type(self).__name__, })
[ "def", "_store", "(", "self", ",", "message_key", ":", "str", ",", "result", ":", "Result", ",", "ttl", ":", "int", ")", "->", "None", ":", "# pragma: no cover", "raise", "NotImplementedError", "(", "\"%(classname)r does not implement _store()\"", "%", "{", "\"c...
Store a result in the backend. Subclasses may implement this method if they want to use the default implementation of set_result.
[ "Store", "a", "result", "in", "the", "backend", ".", "Subclasses", "may", "implement", "this", "method", "if", "they", "want", "to", "use", "the", "default", "implementation", "of", "set_result", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/results/backend.py#L136-L143
train
222,081
Bogdanp/dramatiq
dramatiq/rate_limits/rate_limiter.py
RateLimiter.acquire
def acquire(self, *, raise_on_failure=True): """Attempt to acquire a slot under this rate limiter. Parameters: raise_on_failure(bool): Whether or not failures should raise an exception. If this is false, the context manager will instead return a boolean value representing whether or not the rate limit slot was acquired. Returns: bool: Whether or not the slot could be acquired. """ acquired = False try: acquired = self._acquire() if raise_on_failure and not acquired: raise RateLimitExceeded("rate limit exceeded for key %(key)r" % vars(self)) yield acquired finally: if acquired: self._release()
python
def acquire(self, *, raise_on_failure=True): """Attempt to acquire a slot under this rate limiter. Parameters: raise_on_failure(bool): Whether or not failures should raise an exception. If this is false, the context manager will instead return a boolean value representing whether or not the rate limit slot was acquired. Returns: bool: Whether or not the slot could be acquired. """ acquired = False try: acquired = self._acquire() if raise_on_failure and not acquired: raise RateLimitExceeded("rate limit exceeded for key %(key)r" % vars(self)) yield acquired finally: if acquired: self._release()
[ "def", "acquire", "(", "self", ",", "*", ",", "raise_on_failure", "=", "True", ")", ":", "acquired", "=", "False", "try", ":", "acquired", "=", "self", ".", "_acquire", "(", ")", "if", "raise_on_failure", "and", "not", "acquired", ":", "raise", "RateLimi...
Attempt to acquire a slot under this rate limiter. Parameters: raise_on_failure(bool): Whether or not failures should raise an exception. If this is false, the context manager will instead return a boolean value representing whether or not the rate limit slot was acquired. Returns: bool: Whether or not the slot could be acquired.
[ "Attempt", "to", "acquire", "a", "slot", "under", "this", "rate", "limiter", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/rate_limits/rate_limiter.py#L56-L78
train
222,082
Bogdanp/dramatiq
dramatiq/middleware/prometheus.py
flock
def flock(path): """Attempt to acquire a POSIX file lock. """ with open(path, "w+") as lf: try: fcntl.flock(lf, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True yield acquired except OSError: acquired = False yield acquired finally: if acquired: fcntl.flock(lf, fcntl.LOCK_UN)
python
def flock(path): """Attempt to acquire a POSIX file lock. """ with open(path, "w+") as lf: try: fcntl.flock(lf, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True yield acquired except OSError: acquired = False yield acquired finally: if acquired: fcntl.flock(lf, fcntl.LOCK_UN)
[ "def", "flock", "(", "path", ")", ":", "with", "open", "(", "path", ",", "\"w+\"", ")", "as", "lf", ":", "try", ":", "fcntl", ".", "flock", "(", "lf", ",", "fcntl", ".", "LOCK_EX", "|", "fcntl", ".", "LOCK_NB", ")", "acquired", "=", "True", "yiel...
Attempt to acquire a POSIX file lock.
[ "Attempt", "to", "acquire", "a", "POSIX", "file", "lock", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/middleware/prometheus.py#L227-L242
train
222,083
Bogdanp/dramatiq
dramatiq/message.py
Message.copy
def copy(self, **attributes): """Create a copy of this message. """ updated_options = attributes.pop("options", {}) options = self.options.copy() options.update(updated_options) return self._replace(**attributes, options=options)
python
def copy(self, **attributes): """Create a copy of this message. """ updated_options = attributes.pop("options", {}) options = self.options.copy() options.update(updated_options) return self._replace(**attributes, options=options)
[ "def", "copy", "(", "self", ",", "*", "*", "attributes", ")", ":", "updated_options", "=", "attributes", ".", "pop", "(", "\"options\"", ",", "{", "}", ")", "options", "=", "self", ".", "options", ".", "copy", "(", ")", "options", ".", "update", "(",...
Create a copy of this message.
[ "Create", "a", "copy", "of", "this", "message", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/message.py#L103-L109
train
222,084
Bogdanp/dramatiq
dramatiq/message.py
Message.get_result
def get_result(self, *, backend=None, block=False, timeout=None): """Get the result associated with this message from a result backend. Warning: If you use multiple result backends or brokers you should always pass the backend parameter. This method is only able to infer the result backend off of the default broker. Parameters: backend(ResultBackend): The result backend to use to get the result. If omitted, this method will try to find and use the result backend on the default broker instance. block(bool): Whether or not to block while waiting for a result. timeout(int): The maximum amount of time, in ms, to block while waiting for a result. Raises: RuntimeError: If there is no result backend on the default broker. ResultMissing: When block is False and the result isn't set. ResultTimeout: When waiting for a result times out. Returns: object: The result. """ if not backend: broker = get_broker() for middleware in broker.middleware: if isinstance(middleware, Results): backend = middleware.backend break else: raise RuntimeError("The default broker doesn't have a results backend.") return backend.get_result(self, block=block, timeout=timeout)
python
def get_result(self, *, backend=None, block=False, timeout=None): """Get the result associated with this message from a result backend. Warning: If you use multiple result backends or brokers you should always pass the backend parameter. This method is only able to infer the result backend off of the default broker. Parameters: backend(ResultBackend): The result backend to use to get the result. If omitted, this method will try to find and use the result backend on the default broker instance. block(bool): Whether or not to block while waiting for a result. timeout(int): The maximum amount of time, in ms, to block while waiting for a result. Raises: RuntimeError: If there is no result backend on the default broker. ResultMissing: When block is False and the result isn't set. ResultTimeout: When waiting for a result times out. Returns: object: The result. """ if not backend: broker = get_broker() for middleware in broker.middleware: if isinstance(middleware, Results): backend = middleware.backend break else: raise RuntimeError("The default broker doesn't have a results backend.") return backend.get_result(self, block=block, timeout=timeout)
[ "def", "get_result", "(", "self", ",", "*", ",", "backend", "=", "None", ",", "block", "=", "False", ",", "timeout", "=", "None", ")", ":", "if", "not", "backend", ":", "broker", "=", "get_broker", "(", ")", "for", "middleware", "in", "broker", ".", ...
Get the result associated with this message from a result backend. Warning: If you use multiple result backends or brokers you should always pass the backend parameter. This method is only able to infer the result backend off of the default broker. Parameters: backend(ResultBackend): The result backend to use to get the result. If omitted, this method will try to find and use the result backend on the default broker instance. block(bool): Whether or not to block while waiting for a result. timeout(int): The maximum amount of time, in ms, to block while waiting for a result. Raises: RuntimeError: If there is no result backend on the default broker. ResultMissing: When block is False and the result isn't set. ResultTimeout: When waiting for a result times out. Returns: object: The result.
[ "Get", "the", "result", "associated", "with", "this", "message", "from", "a", "result", "backend", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/message.py#L111-L147
train
222,085
Bogdanp/dramatiq
dramatiq/common.py
compute_backoff
def compute_backoff(attempts, *, factor=5, jitter=True, max_backoff=2000, max_exponent=32): """Compute an exponential backoff value based on some number of attempts. Parameters: attempts(int): The number of attempts there have been so far. factor(int): The number of milliseconds to multiply each backoff by. max_backoff(int): The max number of milliseconds to backoff by. max_exponent(int): The maximum backoff exponent. Returns: tuple: The new number of attempts and the backoff in milliseconds. """ exponent = min(attempts, max_exponent) backoff = min(factor * 2 ** exponent, max_backoff) if jitter: backoff /= 2 backoff = int(backoff + uniform(0, backoff)) return attempts + 1, backoff
python
def compute_backoff(attempts, *, factor=5, jitter=True, max_backoff=2000, max_exponent=32): """Compute an exponential backoff value based on some number of attempts. Parameters: attempts(int): The number of attempts there have been so far. factor(int): The number of milliseconds to multiply each backoff by. max_backoff(int): The max number of milliseconds to backoff by. max_exponent(int): The maximum backoff exponent. Returns: tuple: The new number of attempts and the backoff in milliseconds. """ exponent = min(attempts, max_exponent) backoff = min(factor * 2 ** exponent, max_backoff) if jitter: backoff /= 2 backoff = int(backoff + uniform(0, backoff)) return attempts + 1, backoff
[ "def", "compute_backoff", "(", "attempts", ",", "*", ",", "factor", "=", "5", ",", "jitter", "=", "True", ",", "max_backoff", "=", "2000", ",", "max_exponent", "=", "32", ")", ":", "exponent", "=", "min", "(", "attempts", ",", "max_exponent", ")", "bac...
Compute an exponential backoff value based on some number of attempts. Parameters: attempts(int): The number of attempts there have been so far. factor(int): The number of milliseconds to multiply each backoff by. max_backoff(int): The max number of milliseconds to backoff by. max_exponent(int): The maximum backoff exponent. Returns: tuple: The new number of attempts and the backoff in milliseconds.
[ "Compute", "an", "exponential", "backoff", "value", "based", "on", "some", "number", "of", "attempts", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/common.py#L24-L41
train
222,086
Bogdanp/dramatiq
dramatiq/common.py
join_all
def join_all(joinables, timeout): """Wait on a list of objects that can be joined with a total timeout represented by ``timeout``. Parameters: joinables(object): Objects with a join method. timeout(int): The total timeout in milliseconds. """ started, elapsed = current_millis(), 0 for ob in joinables: ob.join(timeout=timeout / 1000) elapsed = current_millis() - started timeout = max(0, timeout - elapsed)
python
def join_all(joinables, timeout): """Wait on a list of objects that can be joined with a total timeout represented by ``timeout``. Parameters: joinables(object): Objects with a join method. timeout(int): The total timeout in milliseconds. """ started, elapsed = current_millis(), 0 for ob in joinables: ob.join(timeout=timeout / 1000) elapsed = current_millis() - started timeout = max(0, timeout - elapsed)
[ "def", "join_all", "(", "joinables", ",", "timeout", ")", ":", "started", ",", "elapsed", "=", "current_millis", "(", ")", ",", "0", "for", "ob", "in", "joinables", ":", "ob", ".", "join", "(", "timeout", "=", "timeout", "/", "1000", ")", "elapsed", ...
Wait on a list of objects that can be joined with a total timeout represented by ``timeout``. Parameters: joinables(object): Objects with a join method. timeout(int): The total timeout in milliseconds.
[ "Wait", "on", "a", "list", "of", "objects", "that", "can", "be", "joined", "with", "a", "total", "timeout", "represented", "by", "timeout", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/common.py#L86-L98
train
222,087
Bogdanp/dramatiq
dramatiq/common.py
dq_name
def dq_name(queue_name): """Returns the delayed queue name for a given queue. If the given queue name already belongs to a delayed queue, then it is returned unchanged. """ if queue_name.endswith(".DQ"): return queue_name if queue_name.endswith(".XQ"): queue_name = queue_name[:-3] return queue_name + ".DQ"
python
def dq_name(queue_name): """Returns the delayed queue name for a given queue. If the given queue name already belongs to a delayed queue, then it is returned unchanged. """ if queue_name.endswith(".DQ"): return queue_name if queue_name.endswith(".XQ"): queue_name = queue_name[:-3] return queue_name + ".DQ"
[ "def", "dq_name", "(", "queue_name", ")", ":", "if", "queue_name", ".", "endswith", "(", "\".DQ\"", ")", ":", "return", "queue_name", "if", "queue_name", ".", "endswith", "(", "\".XQ\"", ")", ":", "queue_name", "=", "queue_name", "[", ":", "-", "3", "]",...
Returns the delayed queue name for a given queue. If the given queue name already belongs to a delayed queue, then it is returned unchanged.
[ "Returns", "the", "delayed", "queue", "name", "for", "a", "given", "queue", ".", "If", "the", "given", "queue", "name", "already", "belongs", "to", "a", "delayed", "queue", "then", "it", "is", "returned", "unchanged", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/common.py#L109-L119
train
222,088
Bogdanp/dramatiq
dramatiq/common.py
xq_name
def xq_name(queue_name): """Returns the dead letter queue name for a given queue. If the given queue name belongs to a delayed queue, the dead letter queue name for the original queue is generated. """ if queue_name.endswith(".XQ"): return queue_name if queue_name.endswith(".DQ"): queue_name = queue_name[:-3] return queue_name + ".XQ"
python
def xq_name(queue_name): """Returns the dead letter queue name for a given queue. If the given queue name belongs to a delayed queue, the dead letter queue name for the original queue is generated. """ if queue_name.endswith(".XQ"): return queue_name if queue_name.endswith(".DQ"): queue_name = queue_name[:-3] return queue_name + ".XQ"
[ "def", "xq_name", "(", "queue_name", ")", ":", "if", "queue_name", ".", "endswith", "(", "\".XQ\"", ")", ":", "return", "queue_name", "if", "queue_name", ".", "endswith", "(", "\".DQ\"", ")", ":", "queue_name", "=", "queue_name", "[", ":", "-", "3", "]",...
Returns the dead letter queue name for a given queue. If the given queue name belongs to a delayed queue, the dead letter queue name for the original queue is generated.
[ "Returns", "the", "dead", "letter", "queue", "name", "for", "a", "given", "queue", ".", "If", "the", "given", "queue", "name", "belongs", "to", "a", "delayed", "queue", "the", "dead", "letter", "queue", "name", "for", "the", "original", "queue", "is", "g...
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/common.py#L122-L132
train
222,089
Bogdanp/dramatiq
dramatiq/broker.py
get_broker
def get_broker() -> "Broker": """Get the global broker instance. If no global broker is set, this initializes a RabbitmqBroker and returns it. Returns: Broker: The default Broker. """ global global_broker if global_broker is None: from .brokers.rabbitmq import RabbitmqBroker set_broker(RabbitmqBroker( host="127.0.0.1", port=5672, heartbeat=5, connection_attempts=5, blocked_connection_timeout=30, )) return global_broker
python
def get_broker() -> "Broker": """Get the global broker instance. If no global broker is set, this initializes a RabbitmqBroker and returns it. Returns: Broker: The default Broker. """ global global_broker if global_broker is None: from .brokers.rabbitmq import RabbitmqBroker set_broker(RabbitmqBroker( host="127.0.0.1", port=5672, heartbeat=5, connection_attempts=5, blocked_connection_timeout=30, )) return global_broker
[ "def", "get_broker", "(", ")", "->", "\"Broker\"", ":", "global", "global_broker", "if", "global_broker", "is", "None", ":", "from", ".", "brokers", ".", "rabbitmq", "import", "RabbitmqBroker", "set_broker", "(", "RabbitmqBroker", "(", "host", "=", "\"127.0.0.1\...
Get the global broker instance. If no global broker is set, this initializes a RabbitmqBroker and returns it. Returns: Broker: The default Broker.
[ "Get", "the", "global", "broker", "instance", ".", "If", "no", "global", "broker", "is", "set", "this", "initializes", "a", "RabbitmqBroker", "and", "returns", "it", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/broker.py#L26-L44
train
222,090
Bogdanp/dramatiq
dramatiq/broker.py
Broker.add_middleware
def add_middleware(self, middleware, *, before=None, after=None): """Add a middleware object to this broker. The middleware is appended to the end of the middleware list by default. You can specify another middleware (by class) as a reference point for where the new middleware should be added. Parameters: middleware(Middleware): The middleware. before(type): Add this middleware before a specific one. after(type): Add this middleware after a specific one. Raises: ValueError: When either ``before`` or ``after`` refer to a middleware that hasn't been registered yet. """ assert not (before and after), \ "provide either 'before' or 'after', but not both" if before or after: for i, m in enumerate(self.middleware): # noqa if isinstance(m, before or after): break else: raise ValueError("Middleware %r not found" % (before or after)) if before: self.middleware.insert(i, middleware) else: self.middleware.insert(i + 1, middleware) else: self.middleware.append(middleware) self.actor_options |= middleware.actor_options for actor_name in self.get_declared_actors(): middleware.after_declare_actor(self, actor_name) for queue_name in self.get_declared_queues(): middleware.after_declare_queue(self, queue_name) for queue_name in self.get_declared_delay_queues(): middleware.after_declare_delay_queue(self, queue_name)
python
def add_middleware(self, middleware, *, before=None, after=None): """Add a middleware object to this broker. The middleware is appended to the end of the middleware list by default. You can specify another middleware (by class) as a reference point for where the new middleware should be added. Parameters: middleware(Middleware): The middleware. before(type): Add this middleware before a specific one. after(type): Add this middleware after a specific one. Raises: ValueError: When either ``before`` or ``after`` refer to a middleware that hasn't been registered yet. """ assert not (before and after), \ "provide either 'before' or 'after', but not both" if before or after: for i, m in enumerate(self.middleware): # noqa if isinstance(m, before or after): break else: raise ValueError("Middleware %r not found" % (before or after)) if before: self.middleware.insert(i, middleware) else: self.middleware.insert(i + 1, middleware) else: self.middleware.append(middleware) self.actor_options |= middleware.actor_options for actor_name in self.get_declared_actors(): middleware.after_declare_actor(self, actor_name) for queue_name in self.get_declared_queues(): middleware.after_declare_queue(self, queue_name) for queue_name in self.get_declared_delay_queues(): middleware.after_declare_delay_queue(self, queue_name)
[ "def", "add_middleware", "(", "self", ",", "middleware", ",", "*", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "assert", "not", "(", "before", "and", "after", ")", ",", "\"provide either 'before' or 'after', but not both\"", "if", "before"...
Add a middleware object to this broker. The middleware is appended to the end of the middleware list by default. You can specify another middleware (by class) as a reference point for where the new middleware should be added. Parameters: middleware(Middleware): The middleware. before(type): Add this middleware before a specific one. after(type): Add this middleware after a specific one. Raises: ValueError: When either ``before`` or ``after`` refer to a middleware that hasn't been registered yet.
[ "Add", "a", "middleware", "object", "to", "this", "broker", ".", "The", "middleware", "is", "appended", "to", "the", "end", "of", "the", "middleware", "list", "by", "default", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/broker.py#L102-L144
train
222,091
Bogdanp/dramatiq
dramatiq/broker.py
Broker.declare_actor
def declare_actor(self, actor): # pragma: no cover """Declare a new actor on this broker. Declaring an Actor twice replaces the first actor with the second by name. Parameters: actor(Actor): The actor being declared. """ self.emit_before("declare_actor", actor) self.declare_queue(actor.queue_name) self.actors[actor.actor_name] = actor self.emit_after("declare_actor", actor)
python
def declare_actor(self, actor): # pragma: no cover """Declare a new actor on this broker. Declaring an Actor twice replaces the first actor with the second by name. Parameters: actor(Actor): The actor being declared. """ self.emit_before("declare_actor", actor) self.declare_queue(actor.queue_name) self.actors[actor.actor_name] = actor self.emit_after("declare_actor", actor)
[ "def", "declare_actor", "(", "self", ",", "actor", ")", ":", "# pragma: no cover", "self", ".", "emit_before", "(", "\"declare_actor\"", ",", "actor", ")", "self", ".", "declare_queue", "(", "actor", ".", "queue_name", ")", "self", ".", "actors", "[", "actor...
Declare a new actor on this broker. Declaring an Actor twice replaces the first actor with the second by name. Parameters: actor(Actor): The actor being declared.
[ "Declare", "a", "new", "actor", "on", "this", "broker", ".", "Declaring", "an", "Actor", "twice", "replaces", "the", "first", "actor", "with", "the", "second", "by", "name", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/broker.py#L166-L176
train
222,092
Bogdanp/dramatiq
dramatiq/brokers/rabbitmq.py
URLRabbitmqBroker
def URLRabbitmqBroker(url, *, middleware=None): """Alias for the RabbitMQ broker that takes a connection URL as a positional argument. Parameters: url(str): A connection string. middleware(list[Middleware]): The middleware to add to this broker. """ warnings.warn( "Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker.", DeprecationWarning, stacklevel=2, ) return RabbitmqBroker(url=url, middleware=middleware)
python
def URLRabbitmqBroker(url, *, middleware=None): """Alias for the RabbitMQ broker that takes a connection URL as a positional argument. Parameters: url(str): A connection string. middleware(list[Middleware]): The middleware to add to this broker. """ warnings.warn( "Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker.", DeprecationWarning, stacklevel=2, ) return RabbitmqBroker(url=url, middleware=middleware)
[ "def", "URLRabbitmqBroker", "(", "url", ",", "*", ",", "middleware", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker.\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ",", ")",...
Alias for the RabbitMQ broker that takes a connection URL as a positional argument. Parameters: url(str): A connection string. middleware(list[Middleware]): The middleware to add to this broker.
[ "Alias", "for", "the", "RabbitMQ", "broker", "that", "takes", "a", "connection", "URL", "as", "a", "positional", "argument", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L387-L400
train
222,093
Bogdanp/dramatiq
dramatiq/brokers/rabbitmq.py
RabbitmqBroker.close
def close(self): """Close all open RabbitMQ connections. """ # The main thread may keep connections open for a long time # w/o publishing heartbeats, which means that they'll end up # being closed by the time the broker is closed. When that # happens, pika logs a bunch of scary stuff so we want to # filter that out. logging_filter = _IgnoreScaryLogs() logging.getLogger("pika.adapters.base_connection").addFilter(logging_filter) logging.getLogger("pika.adapters.blocking_connection").addFilter(logging_filter) self.logger.debug("Closing channels and connections...") for channel_or_conn in chain(self.channels, self.connections): try: channel_or_conn.close() except pika.exceptions.AMQPError: pass except Exception: # pragma: no cover self.logger.debug("Encountered an error while closing %r.", channel_or_conn, exc_info=True) self.logger.debug("Channels and connections closed.")
python
def close(self): """Close all open RabbitMQ connections. """ # The main thread may keep connections open for a long time # w/o publishing heartbeats, which means that they'll end up # being closed by the time the broker is closed. When that # happens, pika logs a bunch of scary stuff so we want to # filter that out. logging_filter = _IgnoreScaryLogs() logging.getLogger("pika.adapters.base_connection").addFilter(logging_filter) logging.getLogger("pika.adapters.blocking_connection").addFilter(logging_filter) self.logger.debug("Closing channels and connections...") for channel_or_conn in chain(self.channels, self.connections): try: channel_or_conn.close() except pika.exceptions.AMQPError: pass except Exception: # pragma: no cover self.logger.debug("Encountered an error while closing %r.", channel_or_conn, exc_info=True) self.logger.debug("Channels and connections closed.")
[ "def", "close", "(", "self", ")", ":", "# The main thread may keep connections open for a long time", "# w/o publishing heartbeats, which means that they'll end up", "# being closed by the time the broker is closed. When that", "# happens, pika logs a bunch of scary stuff so we want to", "# fil...
Close all open RabbitMQ connections.
[ "Close", "all", "open", "RabbitMQ", "connections", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L147-L168
train
222,094
Bogdanp/dramatiq
dramatiq/brokers/rabbitmq.py
RabbitmqBroker.declare_queue
def declare_queue(self, queue_name): """Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed. """ attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self._declare_dq_queue(queue_name) self.delay_queues.add(delayed_name) self.emit_after("declare_delay_queue", delayed_name) self._declare_xq_queue(queue_name) break except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError) as e: # pragma: no cover # Delete the channel and the connection so that the next # caller may initiate new ones of each. del self.channel del self.connection attempts += 1 if attempts > MAX_DECLARE_ATTEMPTS: raise ConnectionClosed(e) from None self.logger.debug( "Retrying declare due to closed connection. [%d/%d]", attempts, MAX_DECLARE_ATTEMPTS, )
python
def declare_queue(self, queue_name): """Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed. """ attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self._declare_dq_queue(queue_name) self.delay_queues.add(delayed_name) self.emit_after("declare_delay_queue", delayed_name) self._declare_xq_queue(queue_name) break except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError) as e: # pragma: no cover # Delete the channel and the connection so that the next # caller may initiate new ones of each. del self.channel del self.connection attempts += 1 if attempts > MAX_DECLARE_ATTEMPTS: raise ConnectionClosed(e) from None self.logger.debug( "Retrying declare due to closed connection. [%d/%d]", attempts, MAX_DECLARE_ATTEMPTS, )
[ "def", "declare_queue", "(", "self", ",", "queue_name", ")", ":", "attempts", "=", "1", "while", "True", ":", "try", ":", "if", "queue_name", "not", "in", "self", ".", "queues", ":", "self", ".", "emit_before", "(", "\"declare_queue\"", ",", "queue_name", ...
Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed.
[ "Declare", "a", "queue", ".", "Has", "no", "effect", "if", "a", "queue", "with", "the", "given", "name", "already", "exists", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L183-L224
train
222,095
Bogdanp/dramatiq
dramatiq/brokers/rabbitmq.py
RabbitmqBroker.get_queue_message_counts
def get_queue_message_counts(self, queue_name): """Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed queue and its dead letter queue. """ queue_response = self._declare_queue(queue_name) dq_queue_response = self._declare_dq_queue(queue_name) xq_queue_response = self._declare_xq_queue(queue_name) return ( queue_response.method.message_count, dq_queue_response.method.message_count, xq_queue_response.method.message_count, )
python
def get_queue_message_counts(self, queue_name): """Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed queue and its dead letter queue. """ queue_response = self._declare_queue(queue_name) dq_queue_response = self._declare_dq_queue(queue_name) xq_queue_response = self._declare_xq_queue(queue_name) return ( queue_response.method.message_count, dq_queue_response.method.message_count, xq_queue_response.method.message_count, )
[ "def", "get_queue_message_counts", "(", "self", ",", "queue_name", ")", ":", "queue_response", "=", "self", ".", "_declare_queue", "(", "queue_name", ")", "dq_queue_response", "=", "self", ".", "_declare_dq_queue", "(", "queue_name", ")", "xq_queue_response", "=", ...
Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed queue and its dead letter queue.
[ "Get", "the", "number", "of", "messages", "in", "a", "queue", ".", "This", "method", "is", "only", "meant", "to", "be", "used", "in", "unit", "and", "integration", "tests", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L318-L336
train
222,096
Bogdanp/dramatiq
dramatiq/rate_limits/barrier.py
Barrier.create
def create(self, parties): """Create the barrier for the given number of parties. Parameters: parties(int): The number of parties to wait for. Returns: bool: Whether or not the new barrier was successfully created. """ assert parties > 0, "parties must be a positive integer." return self.backend.add(self.key, parties, self.ttl)
python
def create(self, parties): """Create the barrier for the given number of parties. Parameters: parties(int): The number of parties to wait for. Returns: bool: Whether or not the new barrier was successfully created. """ assert parties > 0, "parties must be a positive integer." return self.backend.add(self.key, parties, self.ttl)
[ "def", "create", "(", "self", ",", "parties", ")", ":", "assert", "parties", ">", "0", ",", "\"parties must be a positive integer.\"", "return", "self", ".", "backend", ".", "add", "(", "self", ".", "key", ",", "parties", ",", "self", ".", "ttl", ")" ]
Create the barrier for the given number of parties. Parameters: parties(int): The number of parties to wait for. Returns: bool: Whether or not the new barrier was successfully created.
[ "Create", "the", "barrier", "for", "the", "given", "number", "of", "parties", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/rate_limits/barrier.py#L50-L60
train
222,097
Bogdanp/dramatiq
dramatiq/rate_limits/barrier.py
Barrier.wait
def wait(self, *, block=True, timeout=None): """Signal that a party has reached the barrier. Warning: Barrier blocking is currently only supported by the stub and Redis backends. Warning: Re-using keys between blocking calls may lead to undefined behaviour. Make sure your barrier keys are always unique (use a UUID). Parameters: block(bool): Whether or not to block while waiting for the other parties. timeout(int): The maximum number of milliseconds to wait for the barrier to be cleared. Returns: bool: Whether or not the barrier has been reached by all parties. """ cleared = not self.backend.decr(self.key, 1, 1, self.ttl) if cleared: self.backend.wait_notify(self.key_events, self.ttl) return True if block: return self.backend.wait(self.key_events, timeout) return False
python
def wait(self, *, block=True, timeout=None): """Signal that a party has reached the barrier. Warning: Barrier blocking is currently only supported by the stub and Redis backends. Warning: Re-using keys between blocking calls may lead to undefined behaviour. Make sure your barrier keys are always unique (use a UUID). Parameters: block(bool): Whether or not to block while waiting for the other parties. timeout(int): The maximum number of milliseconds to wait for the barrier to be cleared. Returns: bool: Whether or not the barrier has been reached by all parties. """ cleared = not self.backend.decr(self.key, 1, 1, self.ttl) if cleared: self.backend.wait_notify(self.key_events, self.ttl) return True if block: return self.backend.wait(self.key_events, timeout) return False
[ "def", "wait", "(", "self", ",", "*", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "cleared", "=", "not", "self", ".", "backend", ".", "decr", "(", "self", ".", "key", ",", "1", ",", "1", ",", "self", ".", "ttl", ")", "if...
Signal that a party has reached the barrier. Warning: Barrier blocking is currently only supported by the stub and Redis backends. Warning: Re-using keys between blocking calls may lead to undefined behaviour. Make sure your barrier keys are always unique (use a UUID). Parameters: block(bool): Whether or not to block while waiting for the other parties. timeout(int): The maximum number of milliseconds to wait for the barrier to be cleared. Returns: bool: Whether or not the barrier has been reached by all parties.
[ "Signal", "that", "a", "party", "has", "reached", "the", "barrier", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/rate_limits/barrier.py#L62-L91
train
222,098
Bogdanp/dramatiq
dramatiq/middleware/threading.py
raise_thread_exception
def raise_thread_exception(thread_id, exception): """Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls. """ if current_platform == "CPython": _raise_thread_exception_cpython(thread_id, exception) else: message = "Setting thread exceptions (%s) is not supported for your current platform (%r)." exctype = (exception if inspect.isclass(exception) else type(exception)).__name__ logger.critical(message, exctype, current_platform)
python
def raise_thread_exception(thread_id, exception): """Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls. """ if current_platform == "CPython": _raise_thread_exception_cpython(thread_id, exception) else: message = "Setting thread exceptions (%s) is not supported for your current platform (%r)." exctype = (exception if inspect.isclass(exception) else type(exception)).__name__ logger.critical(message, exctype, current_platform)
[ "def", "raise_thread_exception", "(", "thread_id", ",", "exception", ")", ":", "if", "current_platform", "==", "\"CPython\"", ":", "_raise_thread_exception_cpython", "(", "thread_id", ",", "exception", ")", "else", ":", "message", "=", "\"Setting thread exceptions (%s) ...
Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls.
[ "Raise", "an", "exception", "in", "a", "thread", "." ]
a8cc2728478e794952a5a50c3fb19ec455fe91b6
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/middleware/threading.py#L43-L59
train
222,099