body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
c9fb6769b830672e45912bea7b49b271c608723f9f0553a73e6e21f36275760a
@staticmethod def _fit_binary(calibrator, X, y, classes=None): '\n Fit a single binary calibrator.\n\n Parameters\n ----------\n calibrator\n X\n y\n classes\n\n Returns\n -------\n\n ' cl = classes[1] X = np.column_stack([np.sum(np.delete(X, cl, axis=1), axis=1), X[(:, cl)]]) unique_y = np.unique(y) if (len(unique_y) == 1): if (classes is not None): if (y[0] == (- 1)): c = 0 else: c = y[0] warnings.warn(('Label %s is present in all training examples.' % str(classes[c]))) calibrator = _ConstantCalibrator().fit(X, unique_y) else: calibrator = clone(calibrator) calibrator.fit(X, y) return calibrator
Fit a single binary calibrator. Parameters ---------- calibrator X y classes Returns -------
pycalib/calibration_methods.py
_fit_binary
JonathanWenger/pycalib
14
python
@staticmethod def _fit_binary(calibrator, X, y, classes=None): '\n Fit a single binary calibrator.\n\n Parameters\n ----------\n calibrator\n X\n y\n classes\n\n Returns\n -------\n\n ' cl = classes[1] X = np.column_stack([np.sum(np.delete(X, cl, axis=1), axis=1), X[(:, cl)]]) unique_y = np.unique(y) if (len(unique_y) == 1): if (classes is not None): if (y[0] == (- 1)): c = 0 else: c = y[0] warnings.warn(('Label %s is present in all training examples.' % str(classes[c]))) calibrator = _ConstantCalibrator().fit(X, unique_y) else: calibrator = clone(calibrator) calibrator.fit(X, y) return calibrator
@staticmethod def _fit_binary(calibrator, X, y, classes=None): '\n Fit a single binary calibrator.\n\n Parameters\n ----------\n calibrator\n X\n y\n classes\n\n Returns\n -------\n\n ' cl = classes[1] X = np.column_stack([np.sum(np.delete(X, cl, axis=1), axis=1), X[(:, cl)]]) unique_y = np.unique(y) if (len(unique_y) == 1): if (classes is not None): if (y[0] == (- 1)): c = 0 else: c = y[0] warnings.warn(('Label %s is present in all training examples.' % str(classes[c]))) calibrator = _ConstantCalibrator().fit(X, unique_y) else: calibrator = clone(calibrator) calibrator.fit(X, y) return calibrator<|docstring|>Fit a single binary calibrator. Parameters ---------- calibrator X y classes Returns -------<|endoftext|>
02b30e62f13d209232f9344c4028034278868b54626fb6efdd6d7e865d2f031b
def empty_safe_bin_mean(a, empty_value): '\n Assign the bin mean to an empty bin. Corresponds to prior assumption of the underlying classifier\n being calibrated.\n ' if (a.size == 0): return empty_value else: return a.mean()
Assign the bin mean to an empty bin. Corresponds to prior assumption of the underlying classifier being calibrated.
pycalib/calibration_methods.py
empty_safe_bin_mean
JonathanWenger/pycalib
14
python
def empty_safe_bin_mean(a, empty_value): '\n Assign the bin mean to an empty bin. Corresponds to prior assumption of the underlying classifier\n being calibrated.\n ' if (a.size == 0): return empty_value else: return a.mean()
def empty_safe_bin_mean(a, empty_value): '\n Assign the bin mean to an empty bin. Corresponds to prior assumption of the underlying classifier\n being calibrated.\n ' if (a.size == 0): return empty_value else: return a.mean()<|docstring|>Assign the bin mean to an empty bin. Corresponds to prior assumption of the underlying classifier being calibrated.<|endoftext|>
467a93081525523b304d37bf4fa30af64184e5a7ebfb8d98be618b7f1a35b50e
def write(filename, mesh, float_fmt='.16e', binary=True): 'Writes msh files, cf.\n <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.\n ' cells = _meshio_to_gmsh_order(mesh.cells) point_data = {} for (key, d) in mesh.point_data.items(): if (key not in ['gmsh:dim_tags']): point_data[key] = d tag_data = {} cell_data = {} for (key, d) in mesh.cell_data.items(): if (key in ['gmsh:physical', 'gmsh:geometrical', 'cell_tags']): tag_data[key] = d else: cell_data[key] = d for tag in ['gmsh:physical', 'gmsh:geometrical']: if (tag not in tag_data): logging.warning(f'Appending zeros to replace the missing {tag[5:]} tag data.') tag_data[tag] = [np.zeros(len(x.data), dtype=c_int) for x in mesh.cells] with open(filename, 'wb') as fh: mode_idx = (1 if binary else 0) size_of_double = 8 fh.write(f'''$MeshFormat 2.2 {mode_idx} {size_of_double} '''.encode()) if binary: np.array([1], dtype=c_int).tofile(fh) fh.write(b'\n') fh.write(b'$EndMeshFormat\n') if mesh.field_data: _write_physical_names(fh, mesh.field_data) _write_nodes(fh, mesh.points, float_fmt, binary) _write_elements(fh, cells, tag_data, binary) if (mesh.gmsh_periodic is not None): _write_periodic(fh, mesh.gmsh_periodic, float_fmt) for (name, dat) in point_data.items(): _write_data(fh, 'NodeData', name, dat, binary) cell_data_raw = raw_from_cell_data(cell_data) for (name, dat) in cell_data_raw.items(): _write_data(fh, 'ElementData', name, dat, binary)
Writes msh files, cf. <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.
src/meshio/gmsh/_gmsh22.py
write
johnkit/meshio
1,209
python
def write(filename, mesh, float_fmt='.16e', binary=True): 'Writes msh files, cf.\n <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.\n ' cells = _meshio_to_gmsh_order(mesh.cells) point_data = {} for (key, d) in mesh.point_data.items(): if (key not in ['gmsh:dim_tags']): point_data[key] = d tag_data = {} cell_data = {} for (key, d) in mesh.cell_data.items(): if (key in ['gmsh:physical', 'gmsh:geometrical', 'cell_tags']): tag_data[key] = d else: cell_data[key] = d for tag in ['gmsh:physical', 'gmsh:geometrical']: if (tag not in tag_data): logging.warning(f'Appending zeros to replace the missing {tag[5:]} tag data.') tag_data[tag] = [np.zeros(len(x.data), dtype=c_int) for x in mesh.cells] with open(filename, 'wb') as fh: mode_idx = (1 if binary else 0) size_of_double = 8 fh.write(f'$MeshFormat 2.2 {mode_idx} {size_of_double} '.encode()) if binary: np.array([1], dtype=c_int).tofile(fh) fh.write(b'\n') fh.write(b'$EndMeshFormat\n') if mesh.field_data: _write_physical_names(fh, mesh.field_data) _write_nodes(fh, mesh.points, float_fmt, binary) _write_elements(fh, cells, tag_data, binary) if (mesh.gmsh_periodic is not None): _write_periodic(fh, mesh.gmsh_periodic, float_fmt) for (name, dat) in point_data.items(): _write_data(fh, 'NodeData', name, dat, binary) cell_data_raw = raw_from_cell_data(cell_data) for (name, dat) in cell_data_raw.items(): _write_data(fh, 'ElementData', name, dat, binary)
def write(filename, mesh, float_fmt='.16e', binary=True): 'Writes msh files, cf.\n <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.\n ' cells = _meshio_to_gmsh_order(mesh.cells) point_data = {} for (key, d) in mesh.point_data.items(): if (key not in ['gmsh:dim_tags']): point_data[key] = d tag_data = {} cell_data = {} for (key, d) in mesh.cell_data.items(): if (key in ['gmsh:physical', 'gmsh:geometrical', 'cell_tags']): tag_data[key] = d else: cell_data[key] = d for tag in ['gmsh:physical', 'gmsh:geometrical']: if (tag not in tag_data): logging.warning(f'Appending zeros to replace the missing {tag[5:]} tag data.') tag_data[tag] = [np.zeros(len(x.data), dtype=c_int) for x in mesh.cells] with open(filename, 'wb') as fh: mode_idx = (1 if binary else 0) size_of_double = 8 fh.write(f'$MeshFormat 2.2 {mode_idx} {size_of_double} '.encode()) if binary: np.array([1], dtype=c_int).tofile(fh) fh.write(b'\n') fh.write(b'$EndMeshFormat\n') if mesh.field_data: _write_physical_names(fh, mesh.field_data) _write_nodes(fh, mesh.points, float_fmt, binary) _write_elements(fh, cells, tag_data, binary) if (mesh.gmsh_periodic is not None): _write_periodic(fh, mesh.gmsh_periodic, float_fmt) for (name, dat) in point_data.items(): _write_data(fh, 'NodeData', name, dat, binary) cell_data_raw = raw_from_cell_data(cell_data) for (name, dat) in cell_data_raw.items(): _write_data(fh, 'ElementData', name, dat, binary)<|docstring|>Writes msh files, cf. <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.<|endoftext|>
829e20985705c95c2b6e4977be78dc1cf4f0c20053ad78d9c214d7e3a55b06ea
def session_preparation(self) -> None: 'FastIron requires to be enable mode to disable paging.' self._test_channel_read() self.set_base_prompt() self.enable() self.disable_paging(command='skip-page-display') time.sleep((0.3 * self.global_delay_factor)) self.clear_buffer()
FastIron requires to be enable mode to disable paging.
netmiko/ruckus/ruckus_fastiron.py
session_preparation
trhoy/netmiko
2,833
python
def session_preparation(self) -> None: self._test_channel_read() self.set_base_prompt() self.enable() self.disable_paging(command='skip-page-display') time.sleep((0.3 * self.global_delay_factor)) self.clear_buffer()
def session_preparation(self) -> None: self._test_channel_read() self.set_base_prompt() self.enable() self.disable_paging(command='skip-page-display') time.sleep((0.3 * self.global_delay_factor)) self.clear_buffer()<|docstring|>FastIron requires to be enable mode to disable paging.<|endoftext|>
77c6a4ff49ec1ff93bc583f64cccbdd69e19dbc64fa84d6d868850add0d5fbd9
def enable(self, cmd: str='enable', pattern: str='(ssword|User Name)', enable_pattern: Optional[str]=None, re_flags: int=re.IGNORECASE) -> str: 'Enter enable mode.\n With RADIUS can prompt for User Name\n SSH@Lab-ICX7250>en\n User Name:service_netmiko\n Password:\n SSH@Lab-ICX7250#\n ' output = '' if (not self.check_enable_mode()): count = 4 i = 1 while (i < count): self.write_channel(self.normalize_cmd(cmd)) new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags, read_entire_line=True) output += new_data if ('User Name' in new_data): self.write_channel(self.normalize_cmd(self.username)) new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags, read_entire_line=True) output += new_data if ('ssword' in new_data): self.write_channel(self.normalize_cmd(self.secret)) new_data = self.read_until_prompt(read_entire_line=True) output += new_data if (not re.search('error.*incorrect.*password', new_data, flags=re.I)): break time.sleep(1) i += 1 if (not self.check_enable_mode()): msg = "Failed to enter enable mode. Please ensure you pass the 'secret' argument to ConnectHandler." raise ValueError(msg) return output
Enter enable mode. With RADIUS can prompt for User Name SSH@Lab-ICX7250>en User Name:service_netmiko Password: SSH@Lab-ICX7250#
netmiko/ruckus/ruckus_fastiron.py
enable
trhoy/netmiko
2,833
python
def enable(self, cmd: str='enable', pattern: str='(ssword|User Name)', enable_pattern: Optional[str]=None, re_flags: int=re.IGNORECASE) -> str: 'Enter enable mode.\n With RADIUS can prompt for User Name\n SSH@Lab-ICX7250>en\n User Name:service_netmiko\n Password:\n SSH@Lab-ICX7250#\n ' output = if (not self.check_enable_mode()): count = 4 i = 1 while (i < count): self.write_channel(self.normalize_cmd(cmd)) new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags, read_entire_line=True) output += new_data if ('User Name' in new_data): self.write_channel(self.normalize_cmd(self.username)) new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags, read_entire_line=True) output += new_data if ('ssword' in new_data): self.write_channel(self.normalize_cmd(self.secret)) new_data = self.read_until_prompt(read_entire_line=True) output += new_data if (not re.search('error.*incorrect.*password', new_data, flags=re.I)): break time.sleep(1) i += 1 if (not self.check_enable_mode()): msg = "Failed to enter enable mode. Please ensure you pass the 'secret' argument to ConnectHandler." raise ValueError(msg) return output
def enable(self, cmd: str='enable', pattern: str='(ssword|User Name)', enable_pattern: Optional[str]=None, re_flags: int=re.IGNORECASE) -> str: 'Enter enable mode.\n With RADIUS can prompt for User Name\n SSH@Lab-ICX7250>en\n User Name:service_netmiko\n Password:\n SSH@Lab-ICX7250#\n ' output = if (not self.check_enable_mode()): count = 4 i = 1 while (i < count): self.write_channel(self.normalize_cmd(cmd)) new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags, read_entire_line=True) output += new_data if ('User Name' in new_data): self.write_channel(self.normalize_cmd(self.username)) new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags, read_entire_line=True) output += new_data if ('ssword' in new_data): self.write_channel(self.normalize_cmd(self.secret)) new_data = self.read_until_prompt(read_entire_line=True) output += new_data if (not re.search('error.*incorrect.*password', new_data, flags=re.I)): break time.sleep(1) i += 1 if (not self.check_enable_mode()): msg = "Failed to enter enable mode. Please ensure you pass the 'secret' argument to ConnectHandler." raise ValueError(msg) return output<|docstring|>Enter enable mode. With RADIUS can prompt for User Name SSH@Lab-ICX7250>en User Name:service_netmiko Password: SSH@Lab-ICX7250#<|endoftext|>
c745e88255d889927b9a81796fbb033536c5ec38aff0c9da2108ad1cdbeaa453
def save_config(self, cmd: str='write mem', confirm: bool=False, confirm_response: str='') -> str: 'Saves configuration.' return super().save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response)
Saves configuration.
netmiko/ruckus/ruckus_fastiron.py
save_config
trhoy/netmiko
2,833
python
def save_config(self, cmd: str='write mem', confirm: bool=False, confirm_response: str=) -> str: return super().save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response)
def save_config(self, cmd: str='write mem', confirm: bool=False, confirm_response: str=) -> str: return super().save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response)<|docstring|>Saves configuration.<|endoftext|>
0471e3d6f2fe8be6a658151d64283e3808f229c60496d76320a4c152c9df9265
def _process_option(self, tsocket: socket, command: bytes, option: bytes) -> None: "\n Ruckus FastIron/ICX does not always echo commands to output by default.\n If server expresses interest in 'ECHO' option, then reply back with 'DO\n ECHO'\n " if (option == ECHO): tsocket.sendall(((IAC + DO) + ECHO)) elif (command in (DO, DONT)): tsocket.sendall(((IAC + WONT) + option)) elif (command in (WILL, WONT)): tsocket.sendall(((IAC + DONT) + option))
Ruckus FastIron/ICX does not always echo commands to output by default. If server expresses interest in 'ECHO' option, then reply back with 'DO ECHO'
netmiko/ruckus/ruckus_fastiron.py
_process_option
trhoy/netmiko
2,833
python
def _process_option(self, tsocket: socket, command: bytes, option: bytes) -> None: "\n Ruckus FastIron/ICX does not always echo commands to output by default.\n If server expresses interest in 'ECHO' option, then reply back with 'DO\n ECHO'\n " if (option == ECHO): tsocket.sendall(((IAC + DO) + ECHO)) elif (command in (DO, DONT)): tsocket.sendall(((IAC + WONT) + option)) elif (command in (WILL, WONT)): tsocket.sendall(((IAC + DONT) + option))
def _process_option(self, tsocket: socket, command: bytes, option: bytes) -> None: "\n Ruckus FastIron/ICX does not always echo commands to output by default.\n If server expresses interest in 'ECHO' option, then reply back with 'DO\n ECHO'\n " if (option == ECHO): tsocket.sendall(((IAC + DO) + ECHO)) elif (command in (DO, DONT)): tsocket.sendall(((IAC + WONT) + option)) elif (command in (WILL, WONT)): tsocket.sendall(((IAC + DONT) + option))<|docstring|>Ruckus FastIron/ICX does not always echo commands to output by default. If server expresses interest in 'ECHO' option, then reply back with 'DO ECHO'<|endoftext|>
b49b8491c5759827e744a59635ab2839533815822449b2fa9e6f6ee38ee73d32
@click.command() @click.option('f', '-f', '--from', prompt='Old username', help='Name to change from.') @click.option('t', '-t', '--to', prompt='New username', help='Name to change to.') @click.option('--bak/--no-bak', default=True, help='Make .bak file for backup. (Defalult is True)') @click.version_option(version=VERSION, prog_name=PROG_NAME) @click.help_option('-h', '--help') def main(f, t, bak): 'GitHub-rename\n\n \x08\n This cli tool helps you to rename your github urls\n in .git/config and .gitmodules.\n\n \x08\n Please help us improve by opening issues\n in https://github.com/qqghst/GitHub-rename\n ' file_name = './.git/config' github_rename(name_before=f, name_after=t, file_name=file_name, bak=bak) file_name = './.gitmodules' github_rename(name_before=f, name_after=t, file_name=file_name, bak=bak)
GitHub-rename  This cli tool helps you to rename your github urls in .git/config and .gitmodules.  Please help us improve by opening issues in https://github.com/qqghst/GitHub-rename
github_rename/clidriver.py
main
qqghst/GitHub-rename
0
python
@click.command() @click.option('f', '-f', '--from', prompt='Old username', help='Name to change from.') @click.option('t', '-t', '--to', prompt='New username', help='Name to change to.') @click.option('--bak/--no-bak', default=True, help='Make .bak file for backup. (Defalult is True)') @click.version_option(version=VERSION, prog_name=PROG_NAME) @click.help_option('-h', '--help') def main(f, t, bak): 'GitHub-rename\n\n \x08\n This cli tool helps you to rename your github urls\n in .git/config and .gitmodules.\n\n \x08\n Please help us improve by opening issues\n in https://github.com/qqghst/GitHub-rename\n ' file_name = './.git/config' github_rename(name_before=f, name_after=t, file_name=file_name, bak=bak) file_name = './.gitmodules' github_rename(name_before=f, name_after=t, file_name=file_name, bak=bak)
@click.command() @click.option('f', '-f', '--from', prompt='Old username', help='Name to change from.') @click.option('t', '-t', '--to', prompt='New username', help='Name to change to.') @click.option('--bak/--no-bak', default=True, help='Make .bak file for backup. (Defalult is True)') @click.version_option(version=VERSION, prog_name=PROG_NAME) @click.help_option('-h', '--help') def main(f, t, bak): 'GitHub-rename\n\n \x08\n This cli tool helps you to rename your github urls\n in .git/config and .gitmodules.\n\n \x08\n Please help us improve by opening issues\n in https://github.com/qqghst/GitHub-rename\n ' file_name = './.git/config' github_rename(name_before=f, name_after=t, file_name=file_name, bak=bak) file_name = './.gitmodules' github_rename(name_before=f, name_after=t, file_name=file_name, bak=bak)<|docstring|>GitHub-rename  This cli tool helps you to rename your github urls in .git/config and .gitmodules.  Please help us improve by opening issues in https://github.com/qqghst/GitHub-rename<|endoftext|>
ac1e7a73c680c9898c88a5f0bc43b9ea0d960d60dda361b3e9354ebccd1d5bcb
def test_create_file(db, clients): '\n Test the create file mutation\n ' client = clients.get('Administrators') study = StudyFactory() version = Version(size=123) version.save() template_version = TemplateVersionFactory() variables = {'version': to_global_id('VersionNode', version.pk), 'name': 'Test file', 'study': to_global_id('StudyNode', study.pk), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2'], 'templateVersion': to_global_id('TemplateVersionNode', template_version.pk)} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) result = resp.json()['data']['createFile']['file'] assert result['id'] assert result['templateVersion']
Test the create file mutation
tests/files/test_create_file.py
test_create_file
kids-first/kf-api-study-creator
3
python
def test_create_file(db, clients): '\n \n ' client = clients.get('Administrators') study = StudyFactory() version = Version(size=123) version.save() template_version = TemplateVersionFactory() variables = {'version': to_global_id('VersionNode', version.pk), 'name': 'Test file', 'study': to_global_id('StudyNode', study.pk), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2'], 'templateVersion': to_global_id('TemplateVersionNode', template_version.pk)} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) result = resp.json()['data']['createFile']['file'] assert result['id'] assert result['templateVersion']
def test_create_file(db, clients): '\n \n ' client = clients.get('Administrators') study = StudyFactory() version = Version(size=123) version.save() template_version = TemplateVersionFactory() variables = {'version': to_global_id('VersionNode', version.pk), 'name': 'Test file', 'study': to_global_id('StudyNode', study.pk), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2'], 'templateVersion': to_global_id('TemplateVersionNode', template_version.pk)} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) result = resp.json()['data']['createFile']['file'] assert result['id'] assert result['templateVersion']<|docstring|>Test the create file mutation<|endoftext|>
4017d35198f32593b4eb4c5ef13f7eb1896717e3e227109e52d7989675b6e392
def test_study_does_not_exist(db, clients): '\n Test that error is returned if the provided study is not found.\n ' client = clients.get('Administrators') study = StudyFactory() version = VersionFactory(study=study) variables = {'version': to_global_id('VersionNode', version.kf_id), 'name': 'Test file', 'study': to_global_id('StudyNode', 'SD_00000000'), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Study does not exist' in resp.json()['errors'][0]['message'])
Test that error is returned if the provided study is not found.
tests/files/test_create_file.py
test_study_does_not_exist
kids-first/kf-api-study-creator
3
python
def test_study_does_not_exist(db, clients): '\n \n ' client = clients.get('Administrators') study = StudyFactory() version = VersionFactory(study=study) variables = {'version': to_global_id('VersionNode', version.kf_id), 'name': 'Test file', 'study': to_global_id('StudyNode', 'SD_00000000'), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Study does not exist' in resp.json()['errors'][0]['message'])
def test_study_does_not_exist(db, clients): '\n \n ' client = clients.get('Administrators') study = StudyFactory() version = VersionFactory(study=study) variables = {'version': to_global_id('VersionNode', version.kf_id), 'name': 'Test file', 'study': to_global_id('StudyNode', 'SD_00000000'), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Study does not exist' in resp.json()['errors'][0]['message'])<|docstring|>Test that error is returned if the provided study is not found.<|endoftext|>
b000a8875dc8e9558d965f47e85995f3042643419788ace6532750397e3d8992
def test_version_does_not_exist(db, clients): '\n Check that an error is returned if the provided version is not found.\n ' client = clients.get('Administrators') variables = {'version': to_global_id('VersionNode', 'FV_00000001'), 'name': 'Test file', 'study': to_global_id('StudyNode', 'SD_00000000'), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Version does not exist' in resp.json()['errors'][0]['message'])
Check that an error is returned if the provided version is not found.
tests/files/test_create_file.py
test_version_does_not_exist
kids-first/kf-api-study-creator
3
python
def test_version_does_not_exist(db, clients): '\n \n ' client = clients.get('Administrators') variables = {'version': to_global_id('VersionNode', 'FV_00000001'), 'name': 'Test file', 'study': to_global_id('StudyNode', 'SD_00000000'), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Version does not exist' in resp.json()['errors'][0]['message'])
def test_version_does_not_exist(db, clients): '\n \n ' client = clients.get('Administrators') variables = {'version': to_global_id('VersionNode', 'FV_00000001'), 'name': 'Test file', 'study': to_global_id('StudyNode', 'SD_00000000'), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Version does not exist' in resp.json()['errors'][0]['message'])<|docstring|>Check that an error is returned if the provided version is not found.<|endoftext|>
ea35a33329251fa3c8f99b1b877927c5322d3b9c9599f638367f29e79e5738a0
def test_version_or_study_required(db, clients): '\n Make sure either a study or version is required.\n ' client = clients.get('Administrators') version = Version(size=123) version.save() variables = {'version': to_global_id('VersionNode', version.kf_id), 'name': 'Test file', 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Study must be specified' in resp.json()['errors'][0]['message'])
Make sure either a study or version is required.
tests/files/test_create_file.py
test_version_or_study_required
kids-first/kf-api-study-creator
3
python
def test_version_or_study_required(db, clients): '\n \n ' client = clients.get('Administrators') version = Version(size=123) version.save() variables = {'version': to_global_id('VersionNode', version.kf_id), 'name': 'Test file', 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Study must be specified' in resp.json()['errors'][0]['message'])
def test_version_or_study_required(db, clients): '\n \n ' client = clients.get('Administrators') version = Version(size=123) version.save() variables = {'version': to_global_id('VersionNode', version.kf_id), 'name': 'Test file', 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2']} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('Study must be specified' in resp.json()['errors'][0]['message'])<|docstring|>Make sure either a study or version is required.<|endoftext|>
9e88b2045845382cf4bb799ca3653474cf086ab3196cbe8e0629465d4dc22462
def test_template_does_not_exist(db, clients): '\n Check that an error is returned if the provided template_version\n is not found.\n ' client = clients.get('Administrators') study = StudyFactory() version = Version(size=123) version.save() variables = {'version': to_global_id('VersionNode', version.pk), 'name': 'Test file', 'study': to_global_id('StudyNode', study.pk), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2'], 'templateVersion': to_global_id('TemplateVersionNode', str(uuid.uuid4()))} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('TemplateVersion' in resp.json()['errors'][0]['message'])
Check that an error is returned if the provided template_version is not found.
tests/files/test_create_file.py
test_template_does_not_exist
kids-first/kf-api-study-creator
3
python
def test_template_does_not_exist(db, clients): '\n Check that an error is returned if the provided template_version\n is not found.\n ' client = clients.get('Administrators') study = StudyFactory() version = Version(size=123) version.save() variables = {'version': to_global_id('VersionNode', version.pk), 'name': 'Test file', 'study': to_global_id('StudyNode', study.pk), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2'], 'templateVersion': to_global_id('TemplateVersionNode', str(uuid.uuid4()))} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('TemplateVersion' in resp.json()['errors'][0]['message'])
def test_template_does_not_exist(db, clients): '\n Check that an error is returned if the provided template_version\n is not found.\n ' client = clients.get('Administrators') study = StudyFactory() version = Version(size=123) version.save() variables = {'version': to_global_id('VersionNode', version.pk), 'name': 'Test file', 'study': to_global_id('StudyNode', study.pk), 'description': 'This is my test file', 'fileType': 'OTH', 'tags': ['tag1', 'tag2'], 'templateVersion': to_global_id('TemplateVersionNode', str(uuid.uuid4()))} data = {'query': CREATE_FILE, 'variables': variables} resp = client.post('/graphql', content_type='application/json', data=data) assert ('errors' in resp.json()) assert ('TemplateVersion' in resp.json()['errors'][0]['message'])<|docstring|>Check that an error is returned if the provided template_version is not found.<|endoftext|>
d45e05e4b706455ae50373dd8dd6f0dc02af277fda6aed5fa9471510687ad2d5
def topk(self, address2id_ptr, precomputed, cell_ptr, cell_size, cell_capacity, n_probe_list, n_candidates=None): '\n data: shape=[n_subvectors // n_cs, n_data, n_cs], dtype=uint8\n precomputed: shape=[n_query, n_clusters], dtype=float32\n is_empty: shape=[n_data], dtype=uint8\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (precomputed.shape == (self.m, n_query, self.k)) assert (precomputed.dtype == torch.float32) assert (cell_size.shape[1] == n_probe) assert (cell_capacity.shape[1] == n_probe) assert (cell_ptr.dtype == cell_size.dtype == cell_capacity.dtype == torch.int64) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), precomputed.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])
data: shape=[n_subvectors // n_cs, n_data, n_cs], dtype=uint8 precomputed: shape=[n_query, n_clusters], dtype=float32 is_empty: shape=[n_data], dtype=uint8 cell_ptr: shape=[n_query, max_n_probe], dtype=int64 cell_size: shape=[n_query, max_n_probe], dtype=int64 cell_capacity: shape=[n_query, max_n_probe], dtype=int64 n_probe_list: shape=[n_query], dtype=int64 n_candidates: int, `k` in topk
torchpq/kernels/DistributedIVFPQTopkCuda.py
topk
DeMoriarty/TorchPQ
103
python
def topk(self, address2id_ptr, precomputed, cell_ptr, cell_size, cell_capacity, n_probe_list, n_candidates=None): '\n data: shape=[n_subvectors // n_cs, n_data, n_cs], dtype=uint8\n precomputed: shape=[n_query, n_clusters], dtype=float32\n is_empty: shape=[n_data], dtype=uint8\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (precomputed.shape == (self.m, n_query, self.k)) assert (precomputed.dtype == torch.float32) assert (cell_size.shape[1] == n_probe) assert (cell_capacity.shape[1] == n_probe) assert (cell_ptr.dtype == cell_size.dtype == cell_capacity.dtype == torch.int64) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), precomputed.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])
def topk(self, address2id_ptr, precomputed, cell_ptr, cell_size, cell_capacity, n_probe_list, n_candidates=None): '\n data: shape=[n_subvectors // n_cs, n_data, n_cs], dtype=uint8\n precomputed: shape=[n_query, n_clusters], dtype=float32\n is_empty: shape=[n_data], dtype=uint8\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (precomputed.shape == (self.m, n_query, self.k)) assert (precomputed.dtype == torch.float32) assert (cell_size.shape[1] == n_probe) assert (cell_capacity.shape[1] == n_probe) assert (cell_ptr.dtype == cell_size.dtype == cell_capacity.dtype == torch.int64) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), precomputed.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])<|docstring|>data: shape=[n_subvectors // n_cs, n_data, n_cs], dtype=uint8 precomputed: shape=[n_query, n_clusters], dtype=float32 is_empty: shape=[n_data], dtype=uint8 cell_ptr: shape=[n_query, max_n_probe], dtype=int64 cell_size: shape=[n_query, max_n_probe], dtype=int64 cell_capacity: shape=[n_query, max_n_probe], dtype=int64 n_probe_list: shape=[n_query], dtype=int64 n_candidates: int, `k` in topk<|endoftext|>
963e382f6c8bb523ed2986abfcd4c7e41b752384a281ff55b67982964c3d8b80
def topk_residual(self, address2id_ptr, precomputed, cell_ptr, cell_size, cell_capacity, base_sims, n_probe_list, n_candidates=None): '\n precomputed: shape=[n_query, max_n_probe, n_subvectors, n_clusters], dtype=float32\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n base_sims: shape=[n_query, max_n_probe], dtype=float32\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (precomputed.shape == (n_query, n_probe, self.m, self.k)) assert (precomputed.dtype == torch.float32) assert (cell_capacity.shape == (n_query, n_probe)) assert (cell_size.shape == (n_query, n_probe)) assert (cell_ptr.dtype == cell_size.dtype == cell_capacity.dtype == torch.int64) assert (base_sims.shape == (n_query, n_probe)) assert (base_sims.dtype == torch.float32) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) precomputed = precomputed.contiguous() base_sims = base_sims.contiguous() if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_residual_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), precomputed.data_ptr(), base_sims.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])
precomputed: shape=[n_query, max_n_probe, n_subvectors, n_clusters], dtype=float32 cell_ptr: shape=[n_query, max_n_probe], dtype=int64 cell_size: shape=[n_query, max_n_probe], dtype=int64 cell_capacity: shape=[n_query, max_n_probe], dtype=int64 base_sims: shape=[n_query, max_n_probe], dtype=float32 n_probe_list: shape=[n_query], dtype=int64 n_candidates: int, `k` in topk
torchpq/kernels/DistributedIVFPQTopkCuda.py
topk_residual
DeMoriarty/TorchPQ
103
python
def topk_residual(self, address2id_ptr, precomputed, cell_ptr, cell_size, cell_capacity, base_sims, n_probe_list, n_candidates=None): '\n precomputed: shape=[n_query, max_n_probe, n_subvectors, n_clusters], dtype=float32\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n base_sims: shape=[n_query, max_n_probe], dtype=float32\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (precomputed.shape == (n_query, n_probe, self.m, self.k)) assert (precomputed.dtype == torch.float32) assert (cell_capacity.shape == (n_query, n_probe)) assert (cell_size.shape == (n_query, n_probe)) assert (cell_ptr.dtype == cell_size.dtype == cell_capacity.dtype == torch.int64) assert (base_sims.shape == (n_query, n_probe)) assert (base_sims.dtype == torch.float32) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) precomputed = precomputed.contiguous() base_sims = base_sims.contiguous() if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_residual_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), precomputed.data_ptr(), base_sims.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])
def topk_residual(self, address2id_ptr, precomputed, cell_ptr, cell_size, cell_capacity, base_sims, n_probe_list, n_candidates=None): '\n precomputed: shape=[n_query, max_n_probe, n_subvectors, n_clusters], dtype=float32\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n base_sims: shape=[n_query, max_n_probe], dtype=float32\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (precomputed.shape == (n_query, n_probe, self.m, self.k)) assert (precomputed.dtype == torch.float32) assert (cell_capacity.shape == (n_query, n_probe)) assert (cell_size.shape == (n_query, n_probe)) assert (cell_ptr.dtype == cell_size.dtype == cell_capacity.dtype == torch.int64) assert (base_sims.shape == (n_query, n_probe)) assert (base_sims.dtype == torch.float32) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) precomputed = precomputed.contiguous() base_sims = base_sims.contiguous() if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_residual_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), precomputed.data_ptr(), base_sims.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])<|docstring|>precomputed: shape=[n_query, max_n_probe, n_subvectors, n_clusters], dtype=float32 cell_ptr: shape=[n_query, max_n_probe], dtype=int64 cell_size: shape=[n_query, max_n_probe], dtype=int64 cell_capacity: shape=[n_query, max_n_probe], dtype=int64 base_sims: shape=[n_query, max_n_probe], dtype=float32 n_probe_list: shape=[n_query], dtype=int64 n_candidates: int, `k` in topk<|endoftext|>
226804f09e2493132e363dc7cc30c9c6150d11cce34cab33cd6b30aef221c73a
def topk_residual_precomputed(self, address2id_ptr, part1, part2, cells, base_sims, cell_ptr, cell_size, cell_capacity, n_probe_list, n_candidates=None): '\n address2id_ptr: shape=[n_query, max_n_probe], dtype=int64\n part1: shape=[n_query, n_subvectors, n_pq_clusters], dtype=float32\n part2: shape=[n_cells, n_subvectors, n_pq_clusters], dtype=float32\n cells: shape=[n_query, max_n_probe], dtype=int64\n base_sims: shape=[n_query, max_n_probe], dtype=float32\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (cell_capacity.shape == (n_query, n_probe)) assert (cell_size.shape == (n_query, n_probe)) assert (base_sims.shape == (n_query, n_probe)) assert (cell_ptr.dtype == cell_size.dtype == torch.int64) assert (part1.dtype == part2.dtype == torch.float32) assert (base_sims.dtype == torch.float32) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) part1 = part1.contiguous() part2 = part2.contiguous() cells = cells.contiguous() base_sims = base_sims.contiguous() if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_residual_precomputed_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), part1.data_ptr(), part2.data_ptr(), cells.data_ptr(), base_sims.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])
address2id_ptr: shape=[n_query, max_n_probe], dtype=int64 part1: shape=[n_query, n_subvectors, n_pq_clusters], dtype=float32 part2: shape=[n_cells, n_subvectors, n_pq_clusters], dtype=float32 cells: shape=[n_query, max_n_probe], dtype=int64 base_sims: shape=[n_query, max_n_probe], dtype=float32 cell_ptr: shape=[n_query, max_n_probe], dtype=int64 cell_size: shape=[n_query, max_n_probe], dtype=int64 cell_capacity: shape=[n_query, max_n_probe], dtype=int64 n_probe_list: shape=[n_query], dtype=int64 n_candidates: int, `k` in topk
torchpq/kernels/DistributedIVFPQTopkCuda.py
topk_residual_precomputed
DeMoriarty/TorchPQ
103
python
def topk_residual_precomputed(self, address2id_ptr, part1, part2, cells, base_sims, cell_ptr, cell_size, cell_capacity, n_probe_list, n_candidates=None): '\n address2id_ptr: shape=[n_query, max_n_probe], dtype=int64\n part1: shape=[n_query, n_subvectors, n_pq_clusters], dtype=float32\n part2: shape=[n_cells, n_subvectors, n_pq_clusters], dtype=float32\n cells: shape=[n_query, max_n_probe], dtype=int64\n base_sims: shape=[n_query, max_n_probe], dtype=float32\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (cell_capacity.shape == (n_query, n_probe)) assert (cell_size.shape == (n_query, n_probe)) assert (base_sims.shape == (n_query, n_probe)) assert (cell_ptr.dtype == cell_size.dtype == torch.int64) assert (part1.dtype == part2.dtype == torch.float32) assert (base_sims.dtype == torch.float32) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) part1 = part1.contiguous() part2 = part2.contiguous() cells = cells.contiguous() base_sims = base_sims.contiguous() if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_residual_precomputed_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), part1.data_ptr(), part2.data_ptr(), cells.data_ptr(), base_sims.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])
def topk_residual_precomputed(self, address2id_ptr, part1, part2, cells, base_sims, cell_ptr, cell_size, cell_capacity, n_probe_list, n_candidates=None): '\n address2id_ptr: shape=[n_query, max_n_probe], dtype=int64\n part1: shape=[n_query, n_subvectors, n_pq_clusters], dtype=float32\n part2: shape=[n_cells, n_subvectors, n_pq_clusters], dtype=float32\n cells: shape=[n_query, max_n_probe], dtype=int64\n base_sims: shape=[n_query, max_n_probe], dtype=float32\n cell_ptr: shape=[n_query, max_n_probe], dtype=int64\n cell_size: shape=[n_query, max_n_probe], dtype=int64\n cell_capacity: shape=[n_query, max_n_probe], dtype=int64\n n_probe_list: shape=[n_query], dtype=int64\n n_candidates: int, `k` in topk\n ' n_query = cell_ptr.shape[0] n_probe = cell_ptr.shape[1] assert (cell_capacity.shape == (n_query, n_probe)) assert (cell_size.shape == (n_query, n_probe)) assert (base_sims.shape == (n_query, n_probe)) assert (cell_ptr.dtype == cell_size.dtype == torch.int64) assert (part1.dtype == part2.dtype == torch.float32) assert (base_sims.dtype == torch.float32) assert (n_probe_list.shape == (n_query,)) assert (n_probe_list.dtype == torch.int64) part1 = part1.contiguous() part2 = part2.contiguous() cells = cells.contiguous() base_sims = base_sims.contiguous() if (n_candidates is None): n_candidates = self.tpb else: assert (n_candidates <= self.tpb) n_candidates_pow_of_2 = (2 * self.next_power_of_2(math.ceil((n_candidates / 2)))) assert (n_candidates_pow_of_2 in [(2 * (2 ** i)) for i in range(10)]) cell_info = torch.stack([cell_size, cell_ptr, cell_capacity], dim=(- 1)) tot_size = cell_size.sum(dim=1) values = torch.empty(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.float32) values.fill_(float('-inf')) indices = torch.zeros(n_query, n_candidates_pow_of_2, device='cuda:0', dtype=torch.int64) threads_per_block = (self.tpb,) blocks_per_grid = (n_query,) self._topk_residual_precomputed_fn(grid=blocks_per_grid, block=threads_per_block, shared_mem=self.sm_size, args=[address2id_ptr.data_ptr(), part1.data_ptr(), part2.data_ptr(), cells.data_ptr(), base_sims.data_ptr(), cell_info.data_ptr(), tot_size.data_ptr(), n_probe_list.data_ptr(), values.data_ptr(), indices.data_ptr(), n_query, n_probe, n_candidates_pow_of_2], stream=self.stream) return (values[(:, :n_candidates)], indices[(:, :n_candidates)])<|docstring|>address2id_ptr: shape=[n_query, max_n_probe], dtype=int64 part1: shape=[n_query, n_subvectors, n_pq_clusters], dtype=float32 part2: shape=[n_cells, n_subvectors, n_pq_clusters], dtype=float32 cells: shape=[n_query, max_n_probe], dtype=int64 base_sims: shape=[n_query, max_n_probe], dtype=float32 cell_ptr: shape=[n_query, max_n_probe], dtype=int64 cell_size: shape=[n_query, max_n_probe], dtype=int64 cell_capacity: shape=[n_query, max_n_probe], dtype=int64 n_probe_list: shape=[n_query], dtype=int64 n_candidates: int, `k` in topk<|endoftext|>
c58f2510a13ec7944bf98d2518b71a7c21a5f485d0924bae051a6cc7537050a6
def test_db_connection_defaults(self): ' Test that the default connection details get set correctly when no env set. ' db_connection_details = db_utils.get_default_connection_details() self.assertEqual(db_connection_details, {'host': '127.0.0.1', 'user': 'vagrant', 'password': 'vagrant', 'database': 'thanatos'})
Test that the default connection details get set correctly when no env set.
tests/database/test_db_utils.py
test_db_connection_defaults
evetrivia/thanatos
1
python
def test_db_connection_defaults(self): ' ' db_connection_details = db_utils.get_default_connection_details() self.assertEqual(db_connection_details, {'host': '127.0.0.1', 'user': 'vagrant', 'password': 'vagrant', 'database': 'thanatos'})
def test_db_connection_defaults(self): ' ' db_connection_details = db_utils.get_default_connection_details() self.assertEqual(db_connection_details, {'host': '127.0.0.1', 'user': 'vagrant', 'password': 'vagrant', 'database': 'thanatos'})<|docstring|>Test that the default connection details get set correctly when no env set.<|endoftext|>
a1b2f2b7d551633835aba906e75e0fcb68263b4e5fd4dfc76b19270f127998d7
def main(): '\n This is the main driver for the Guess The Karma example. The driver function cycles through four different combinations of ScoringFunctions and Combiners\n ' max_k = 3 max_items = 20 bootstrap_samples = 5 num_processors = 2 combiner = AnonymousBayesianCombiner(allowable_labels=['l', 'r']) scorer = CrossEntropyScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = FrequencyCombiner(allowable_labels=['l', 'r']) scorer = CrossEntropyScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = PluralityVote(allowable_labels=['l', 'r']) scorer = F1Score() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = PluralityVote(allowable_labels=['l', 'r']) scorer = AgreementScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors)
This is the main driver for the Guess The Karma example. The driver function cycles through four different combinations of ScoringFunctions and Combiners
surveyequivalence/examples/guessthekarma.py
main
DavidXu999/surveyequivalence
0
python
def main(): '\n \n ' max_k = 3 max_items = 20 bootstrap_samples = 5 num_processors = 2 combiner = AnonymousBayesianCombiner(allowable_labels=['l', 'r']) scorer = CrossEntropyScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = FrequencyCombiner(allowable_labels=['l', 'r']) scorer = CrossEntropyScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = PluralityVote(allowable_labels=['l', 'r']) scorer = F1Score() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = PluralityVote(allowable_labels=['l', 'r']) scorer = AgreementScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors)
def main(): '\n \n ' max_k = 3 max_items = 20 bootstrap_samples = 5 num_processors = 2 combiner = AnonymousBayesianCombiner(allowable_labels=['l', 'r']) scorer = CrossEntropyScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = FrequencyCombiner(allowable_labels=['l', 'r']) scorer = CrossEntropyScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = PluralityVote(allowable_labels=['l', 'r']) scorer = F1Score() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors) combiner = PluralityVote(allowable_labels=['l', 'r']) scorer = AgreementScore() run(combiner=combiner, scorer=scorer, max_k=max_k, max_items=max_items, bootstrap_samples=bootstrap_samples, num_processors=num_processors)<|docstring|>This is the main driver for the Guess The Karma example. The driver function cycles through four different combinations of ScoringFunctions and Combiners<|endoftext|>
0b8091f344217150bf13ecba87161e78288d3ac85e974c0d962217a3c2749a48
def run(combiner: Combiner, scorer: Scorer, max_k: int, max_items: int, bootstrap_samples: int, num_processors: int): '\n Run GuessTheKarma example with provided combiner and scorer.\n\n With GuessTheKarma data we have annotations for image pairs (items) from non-anonymous raters. In addition, each\n "correct" item is annotated as \'A\' (\'B\' is therefore the incorrect answer). So to balance the dataset we\n randomly swap \'A\' for \'B\'.\n\n Parameters\n ----------\n combiner : Combiner\n Combiner function\n scorer : Scorer\n Scoring function\n max_k : int\n Maximum number of raters to use when calculating survey power curve. Lower values dramatically speed up execution of the procedure. No default is set, but this value is typically equal to the average number of raters per item.\n max_items : int\n Maximum items to use from the dataset. Fewer items increases the speed of the procedure by results in loss of statistical power. No default is set. If this value is smaller than the number of items in the dataset then the function will only take the first max_items items from the dataset thereby ignoring some data.\n bootstrap_samples : int\n Number of samples to use when calculating survey equivalence. Like the number of samples in a t-test, more samples increases the statistical power, but each requires additional computational time. No default is set.\n num_processors : int\n Number of processors to use for parallel processing\n\n Notes\n -----\n This function uses data collected by the GuessTheKarma Web game [1]_ to generate survey equivalence values.\n\n References\n ----------\n .. [1] Glenski, M., Stoddard, G., Resnick, P., & Weninger, T. (2018). Guessthekarma: A game to assess social rating systems. Proceedings of the ACM on Human-Computer Interaction, 2(CSCW), 1-15.\n ' gtk = pd.read_csv(f'data/vote_gtk2.csv') prefer_W = dict() flip_dict = dict() for (index, rating) in gtk.iterrows(): if (rating['image_pair'] not in prefer_W): flip_dict[rating['image_pair']] = choice([True, False]) if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']] = list('r') else: prefer_W[rating['image_pair']] = list('l') rater_opinion = rating['opinion_choice'] if (rater_opinion == 'A'): if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']].append('r') else: prefer_W[rating['image_pair']].append('l') elif (rater_opinion == 'B'): if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']].append('l') else: prefer_W[rating['image_pair']].append('r') else: pass x = list(prefer_W.values()) length = max(map(len, x)) W = np.array([(xi + ([None] * (length - len(xi)))) for xi in x]) print('##GUESSTHEKARMA - Dataset loaded##', len(W)) W = pd.DataFrame(data=W)[:max_items] W = W.rename(columns={0: 'hard classifier'}) calibrated_predictions_l = W[(W['hard classifier'] == 'l')][W.columns.difference(['hard classifier'])].apply(pd.Series.value_counts, normalize=True, axis=1).fillna(0).mean(axis=0) calibrated_predictions_r = W[(W['hard classifier'] == 'r')][W.columns.difference(['hard classifier'])].apply(pd.Series.value_counts, normalize=True, axis=1).fillna(0).mean(axis=0) print(calibrated_predictions_l, calibrated_predictions_r) classifier = pd.DataFrame([(DiscreteDistributionPrediction(['l', 'r'], [calibrated_predictions_l['l'], calibrated_predictions_l['r']], normalize=False) if (reddit == 'l') else DiscreteDistributionPrediction(['l', 'r'], [calibrated_predictions_r['l'], calibrated_predictions_r['r']], normalize=False)) for reddit in W['hard classifier']], columns=['Reddit Scores Classifier']) W = W.drop(['hard classifier'], axis=1) if (type(scorer) is CrossEntropyScore): prior = AnalysisPipeline(W, combiner=AnonymousBayesianCombiner(allowable_labels=['l', 'r']), scorer=scorer, allowable_labels=['l', 'r'], num_bootstrap_item_samples=0, verbosity=1, classifier_predictions=classifier, max_K=1, anonymous_raters=True, procs=num_processors) else: prior = None p = AnalysisPipeline(W, combiner=combiner, scorer=scorer, allowable_labels=['l', 'r'], num_bootstrap_item_samples=bootstrap_samples, verbosity=1, classifier_predictions=classifier, max_K=max_k, anonymous_raters=True, procs=num_processors) p.save(path=p.path_for_saving(f'GTK/{combiner.__class__.__name__}_plus_{scorer.__class__.__name__}'), msg=f''' Running GuessTheKarma experiment with {len(W)} items and {len(W.columns)} raters per item {bootstrap_samples} bootstrap itemsets {combiner.__class__.__name__} with {scorer.__class__.__name__}. ''') (fig, ax) = plt.subplots() fig.set_size_inches(8.5, 10.5) pl = Plot(ax, p.expert_power_curve, classifier_scores=p.classifier_scores, y_axis_label='score', center_on=(prior.expert_power_curve.values[0] if (prior is not None) else None), name=f'GTK {type(combiner).__name__}_plus_{type(scorer).__name__}', legend_label='k raters', generate_pgf=True) pl.plot(include_classifiers=True, include_classifier_equivalences=True, include_droplines=True, include_expert_points='all', connect_expert_points=True, include_classifier_cis=True) pl.save(p.path_for_saving(f'GTK/{type(combiner).__name__}_plus_{type(scorer).__name__}'), fig=fig)
Run GuessTheKarma example with provided combiner and scorer. With GuessTheKarma data we have annotations for image pairs (items) from non-anonymous raters. In addition, each "correct" item is annotated as 'A' ('B' is therefore the incorrect answer). So to balance the dataset we randomly swap 'A' for 'B'. Parameters ---------- combiner : Combiner Combiner function scorer : Scorer Scoring function max_k : int Maximum number of raters to use when calculating survey power curve. Lower values dramatically speed up execution of the procedure. No default is set, but this value is typically equal to the average number of raters per item. max_items : int Maximum items to use from the dataset. Fewer items increases the speed of the procedure by results in loss of statistical power. No default is set. If this value is smaller than the number of items in the dataset then the function will only take the first max_items items from the dataset thereby ignoring some data. bootstrap_samples : int Number of samples to use when calculating survey equivalence. Like the number of samples in a t-test, more samples increases the statistical power, but each requires additional computational time. No default is set. num_processors : int Number of processors to use for parallel processing Notes ----- This function uses data collected by the GuessTheKarma Web game [1]_ to generate survey equivalence values. References ---------- .. [1] Glenski, M., Stoddard, G., Resnick, P., & Weninger, T. (2018). Guessthekarma: A game to assess social rating systems. Proceedings of the ACM on Human-Computer Interaction, 2(CSCW), 1-15.
surveyequivalence/examples/guessthekarma.py
run
DavidXu999/surveyequivalence
0
python
def run(combiner: Combiner, scorer: Scorer, max_k: int, max_items: int, bootstrap_samples: int, num_processors: int): '\n Run GuessTheKarma example with provided combiner and scorer.\n\n With GuessTheKarma data we have annotations for image pairs (items) from non-anonymous raters. In addition, each\n "correct" item is annotated as \'A\' (\'B\' is therefore the incorrect answer). So to balance the dataset we\n randomly swap \'A\' for \'B\'.\n\n Parameters\n ----------\n combiner : Combiner\n Combiner function\n scorer : Scorer\n Scoring function\n max_k : int\n Maximum number of raters to use when calculating survey power curve. Lower values dramatically speed up execution of the procedure. No default is set, but this value is typically equal to the average number of raters per item.\n max_items : int\n Maximum items to use from the dataset. Fewer items increases the speed of the procedure by results in loss of statistical power. No default is set. If this value is smaller than the number of items in the dataset then the function will only take the first max_items items from the dataset thereby ignoring some data.\n bootstrap_samples : int\n Number of samples to use when calculating survey equivalence. Like the number of samples in a t-test, more samples increases the statistical power, but each requires additional computational time. No default is set.\n num_processors : int\n Number of processors to use for parallel processing\n\n Notes\n -----\n This function uses data collected by the GuessTheKarma Web game [1]_ to generate survey equivalence values.\n\n References\n ----------\n .. [1] Glenski, M., Stoddard, G., Resnick, P., & Weninger, T. (2018). Guessthekarma: A game to assess social rating systems. Proceedings of the ACM on Human-Computer Interaction, 2(CSCW), 1-15.\n ' gtk = pd.read_csv(f'data/vote_gtk2.csv') prefer_W = dict() flip_dict = dict() for (index, rating) in gtk.iterrows(): if (rating['image_pair'] not in prefer_W): flip_dict[rating['image_pair']] = choice([True, False]) if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']] = list('r') else: prefer_W[rating['image_pair']] = list('l') rater_opinion = rating['opinion_choice'] if (rater_opinion == 'A'): if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']].append('r') else: prefer_W[rating['image_pair']].append('l') elif (rater_opinion == 'B'): if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']].append('l') else: prefer_W[rating['image_pair']].append('r') else: pass x = list(prefer_W.values()) length = max(map(len, x)) W = np.array([(xi + ([None] * (length - len(xi)))) for xi in x]) print('##GUESSTHEKARMA - Dataset loaded##', len(W)) W = pd.DataFrame(data=W)[:max_items] W = W.rename(columns={0: 'hard classifier'}) calibrated_predictions_l = W[(W['hard classifier'] == 'l')][W.columns.difference(['hard classifier'])].apply(pd.Series.value_counts, normalize=True, axis=1).fillna(0).mean(axis=0) calibrated_predictions_r = W[(W['hard classifier'] == 'r')][W.columns.difference(['hard classifier'])].apply(pd.Series.value_counts, normalize=True, axis=1).fillna(0).mean(axis=0) print(calibrated_predictions_l, calibrated_predictions_r) classifier = pd.DataFrame([(DiscreteDistributionPrediction(['l', 'r'], [calibrated_predictions_l['l'], calibrated_predictions_l['r']], normalize=False) if (reddit == 'l') else DiscreteDistributionPrediction(['l', 'r'], [calibrated_predictions_r['l'], calibrated_predictions_r['r']], normalize=False)) for reddit in W['hard classifier']], columns=['Reddit Scores Classifier']) W = W.drop(['hard classifier'], axis=1) if (type(scorer) is CrossEntropyScore): prior = AnalysisPipeline(W, combiner=AnonymousBayesianCombiner(allowable_labels=['l', 'r']), scorer=scorer, allowable_labels=['l', 'r'], num_bootstrap_item_samples=0, verbosity=1, classifier_predictions=classifier, max_K=1, anonymous_raters=True, procs=num_processors) else: prior = None p = AnalysisPipeline(W, combiner=combiner, scorer=scorer, allowable_labels=['l', 'r'], num_bootstrap_item_samples=bootstrap_samples, verbosity=1, classifier_predictions=classifier, max_K=max_k, anonymous_raters=True, procs=num_processors) p.save(path=p.path_for_saving(f'GTK/{combiner.__class__.__name__}_plus_{scorer.__class__.__name__}'), msg=f' Running GuessTheKarma experiment with {len(W)} items and {len(W.columns)} raters per item {bootstrap_samples} bootstrap itemsets {combiner.__class__.__name__} with {scorer.__class__.__name__}. ') (fig, ax) = plt.subplots() fig.set_size_inches(8.5, 10.5) pl = Plot(ax, p.expert_power_curve, classifier_scores=p.classifier_scores, y_axis_label='score', center_on=(prior.expert_power_curve.values[0] if (prior is not None) else None), name=f'GTK {type(combiner).__name__}_plus_{type(scorer).__name__}', legend_label='k raters', generate_pgf=True) pl.plot(include_classifiers=True, include_classifier_equivalences=True, include_droplines=True, include_expert_points='all', connect_expert_points=True, include_classifier_cis=True) pl.save(p.path_for_saving(f'GTK/{type(combiner).__name__}_plus_{type(scorer).__name__}'), fig=fig)
def run(combiner: Combiner, scorer: Scorer, max_k: int, max_items: int, bootstrap_samples: int, num_processors: int): '\n Run GuessTheKarma example with provided combiner and scorer.\n\n With GuessTheKarma data we have annotations for image pairs (items) from non-anonymous raters. In addition, each\n "correct" item is annotated as \'A\' (\'B\' is therefore the incorrect answer). So to balance the dataset we\n randomly swap \'A\' for \'B\'.\n\n Parameters\n ----------\n combiner : Combiner\n Combiner function\n scorer : Scorer\n Scoring function\n max_k : int\n Maximum number of raters to use when calculating survey power curve. Lower values dramatically speed up execution of the procedure. No default is set, but this value is typically equal to the average number of raters per item.\n max_items : int\n Maximum items to use from the dataset. Fewer items increases the speed of the procedure by results in loss of statistical power. No default is set. If this value is smaller than the number of items in the dataset then the function will only take the first max_items items from the dataset thereby ignoring some data.\n bootstrap_samples : int\n Number of samples to use when calculating survey equivalence. Like the number of samples in a t-test, more samples increases the statistical power, but each requires additional computational time. No default is set.\n num_processors : int\n Number of processors to use for parallel processing\n\n Notes\n -----\n This function uses data collected by the GuessTheKarma Web game [1]_ to generate survey equivalence values.\n\n References\n ----------\n .. [1] Glenski, M., Stoddard, G., Resnick, P., & Weninger, T. (2018). Guessthekarma: A game to assess social rating systems. Proceedings of the ACM on Human-Computer Interaction, 2(CSCW), 1-15.\n ' gtk = pd.read_csv(f'data/vote_gtk2.csv') prefer_W = dict() flip_dict = dict() for (index, rating) in gtk.iterrows(): if (rating['image_pair'] not in prefer_W): flip_dict[rating['image_pair']] = choice([True, False]) if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']] = list('r') else: prefer_W[rating['image_pair']] = list('l') rater_opinion = rating['opinion_choice'] if (rater_opinion == 'A'): if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']].append('r') else: prefer_W[rating['image_pair']].append('l') elif (rater_opinion == 'B'): if flip_dict[rating['image_pair']]: prefer_W[rating['image_pair']].append('l') else: prefer_W[rating['image_pair']].append('r') else: pass x = list(prefer_W.values()) length = max(map(len, x)) W = np.array([(xi + ([None] * (length - len(xi)))) for xi in x]) print('##GUESSTHEKARMA - Dataset loaded##', len(W)) W = pd.DataFrame(data=W)[:max_items] W = W.rename(columns={0: 'hard classifier'}) calibrated_predictions_l = W[(W['hard classifier'] == 'l')][W.columns.difference(['hard classifier'])].apply(pd.Series.value_counts, normalize=True, axis=1).fillna(0).mean(axis=0) calibrated_predictions_r = W[(W['hard classifier'] == 'r')][W.columns.difference(['hard classifier'])].apply(pd.Series.value_counts, normalize=True, axis=1).fillna(0).mean(axis=0) print(calibrated_predictions_l, calibrated_predictions_r) classifier = pd.DataFrame([(DiscreteDistributionPrediction(['l', 'r'], [calibrated_predictions_l['l'], calibrated_predictions_l['r']], normalize=False) if (reddit == 'l') else DiscreteDistributionPrediction(['l', 'r'], [calibrated_predictions_r['l'], calibrated_predictions_r['r']], normalize=False)) for reddit in W['hard classifier']], columns=['Reddit Scores Classifier']) W = W.drop(['hard classifier'], axis=1) if (type(scorer) is CrossEntropyScore): prior = AnalysisPipeline(W, combiner=AnonymousBayesianCombiner(allowable_labels=['l', 'r']), scorer=scorer, allowable_labels=['l', 'r'], num_bootstrap_item_samples=0, verbosity=1, classifier_predictions=classifier, max_K=1, anonymous_raters=True, procs=num_processors) else: prior = None p = AnalysisPipeline(W, combiner=combiner, scorer=scorer, allowable_labels=['l', 'r'], num_bootstrap_item_samples=bootstrap_samples, verbosity=1, classifier_predictions=classifier, max_K=max_k, anonymous_raters=True, procs=num_processors) p.save(path=p.path_for_saving(f'GTK/{combiner.__class__.__name__}_plus_{scorer.__class__.__name__}'), msg=f' Running GuessTheKarma experiment with {len(W)} items and {len(W.columns)} raters per item {bootstrap_samples} bootstrap itemsets {combiner.__class__.__name__} with {scorer.__class__.__name__}. ') (fig, ax) = plt.subplots() fig.set_size_inches(8.5, 10.5) pl = Plot(ax, p.expert_power_curve, classifier_scores=p.classifier_scores, y_axis_label='score', center_on=(prior.expert_power_curve.values[0] if (prior is not None) else None), name=f'GTK {type(combiner).__name__}_plus_{type(scorer).__name__}', legend_label='k raters', generate_pgf=True) pl.plot(include_classifiers=True, include_classifier_equivalences=True, include_droplines=True, include_expert_points='all', connect_expert_points=True, include_classifier_cis=True) pl.save(p.path_for_saving(f'GTK/{type(combiner).__name__}_plus_{type(scorer).__name__}'), fig=fig)<|docstring|>Run GuessTheKarma example with provided combiner and scorer. With GuessTheKarma data we have annotations for image pairs (items) from non-anonymous raters. In addition, each "correct" item is annotated as 'A' ('B' is therefore the incorrect answer). So to balance the dataset we randomly swap 'A' for 'B'. Parameters ---------- combiner : Combiner Combiner function scorer : Scorer Scoring function max_k : int Maximum number of raters to use when calculating survey power curve. Lower values dramatically speed up execution of the procedure. No default is set, but this value is typically equal to the average number of raters per item. max_items : int Maximum items to use from the dataset. Fewer items increases the speed of the procedure by results in loss of statistical power. No default is set. If this value is smaller than the number of items in the dataset then the function will only take the first max_items items from the dataset thereby ignoring some data. bootstrap_samples : int Number of samples to use when calculating survey equivalence. Like the number of samples in a t-test, more samples increases the statistical power, but each requires additional computational time. No default is set. num_processors : int Number of processors to use for parallel processing Notes ----- This function uses data collected by the GuessTheKarma Web game [1]_ to generate survey equivalence values. References ---------- .. [1] Glenski, M., Stoddard, G., Resnick, P., & Weninger, T. (2018). Guessthekarma: A game to assess social rating systems. Proceedings of the ACM on Human-Computer Interaction, 2(CSCW), 1-15.<|endoftext|>
e1425e79f443feda56a97841a78caa2c7c73597f0349ea3b1c126213bc6831a1
def setUp(self): '\n Set up method that will run before every Test\n ' self.new_quote = Quote(1, 'Debbie', 'its never that serious')
Set up method that will run before every Test
tests/test_quote.py
setUp
DebbieElabonga/blog
0
python
def setUp(self): '\n \n ' self.new_quote = Quote(1, 'Debbie', 'its never that serious')
def setUp(self): '\n \n ' self.new_quote = Quote(1, 'Debbie', 'its never that serious')<|docstring|>Set up method that will run before every Test<|endoftext|>
163719fbfababffa6b525692e6937f49ed2a604f0fa3ee521f08421b5289f37c
def __init__(self, mu=0.0): 'Create a posterior assuming the prior is :math:`\\mathcal{N}(\\mu, 1)`.\n \n - The prior is centered (:math:`\\mu=1`) by default, but parameter ``mu`` can be used to change this default.\n ' self._mu = float(mu) self.mu = float(mu) self._nu = 1.0 self.sigma = 1.0 self._nb_data = 0 self._sum_data = 0.0
Create a posterior assuming the prior is :math:`\mathcal{N}(\mu, 1)`. - The prior is centered (:math:`\mu=1`) by default, but parameter ``mu`` can be used to change this default.
SMPyBandits/Policies/Posterior/Gauss.py
__init__
SlyJabiru/SMPyBandits
309
python
def __init__(self, mu=0.0): 'Create a posterior assuming the prior is :math:`\\mathcal{N}(\\mu, 1)`.\n \n - The prior is centered (:math:`\\mu=1`) by default, but parameter ``mu`` can be used to change this default.\n ' self._mu = float(mu) self.mu = float(mu) self._nu = 1.0 self.sigma = 1.0 self._nb_data = 0 self._sum_data = 0.0
def __init__(self, mu=0.0): 'Create a posterior assuming the prior is :math:`\\mathcal{N}(\\mu, 1)`.\n \n - The prior is centered (:math:`\\mu=1`) by default, but parameter ``mu`` can be used to change this default.\n ' self._mu = float(mu) self.mu = float(mu) self._nu = 1.0 self.sigma = 1.0 self._nb_data = 0 self._sum_data = 0.0<|docstring|>Create a posterior assuming the prior is :math:`\mathcal{N}(\mu, 1)`. - The prior is centered (:math:`\mu=1`) by default, but parameter ``mu`` can be used to change this default.<|endoftext|>
7234e4ab46a5a3b8343055d80b6171ddbcc4ae5101a74429c462382b209b0b3f
def reset(self, mu=None): ' Reset the for parameters :math:`\\mu, \\sigma`, as when creating a new Gauss posterior.' if (mu is None): self.mu = self._mu self.sigma = self._nu
Reset the for parameters :math:`\mu, \sigma`, as when creating a new Gauss posterior.
SMPyBandits/Policies/Posterior/Gauss.py
reset
SlyJabiru/SMPyBandits
309
python
def reset(self, mu=None): ' Reset the for parameters :math:`\\mu, \\sigma`, as when creating a new Gauss posterior.' if (mu is None): self.mu = self._mu self.sigma = self._nu
def reset(self, mu=None): ' Reset the for parameters :math:`\\mu, \\sigma`, as when creating a new Gauss posterior.' if (mu is None): self.mu = self._mu self.sigma = self._nu<|docstring|>Reset the for parameters :math:`\mu, \sigma`, as when creating a new Gauss posterior.<|endoftext|>
8ebc3becedd87058b842584a8177abef407a9b02fd2919ab30e79d8a477bfcec
def sample(self): ' Get a random sample :math:`(x, \\sigma^2)` from the Gaussian posterior (using :func:`scipy.stats.invgamma` for the variance :math:`\\sigma^2` parameter and :func:`numpy.random.normal` for the mean :math:`x`).\n\n - Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.\n ' return normalvariate(loc=self.mu, scale=self.sigma)
Get a random sample :math:`(x, \sigma^2)` from the Gaussian posterior (using :func:`scipy.stats.invgamma` for the variance :math:`\sigma^2` parameter and :func:`numpy.random.normal` for the mean :math:`x`). - Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.
SMPyBandits/Policies/Posterior/Gauss.py
sample
SlyJabiru/SMPyBandits
309
python
def sample(self): ' Get a random sample :math:`(x, \\sigma^2)` from the Gaussian posterior (using :func:`scipy.stats.invgamma` for the variance :math:`\\sigma^2` parameter and :func:`numpy.random.normal` for the mean :math:`x`).\n\n - Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.\n ' return normalvariate(loc=self.mu, scale=self.sigma)
def sample(self): ' Get a random sample :math:`(x, \\sigma^2)` from the Gaussian posterior (using :func:`scipy.stats.invgamma` for the variance :math:`\\sigma^2` parameter and :func:`numpy.random.normal` for the mean :math:`x`).\n\n - Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.\n ' return normalvariate(loc=self.mu, scale=self.sigma)<|docstring|>Get a random sample :math:`(x, \sigma^2)` from the Gaussian posterior (using :func:`scipy.stats.invgamma` for the variance :math:`\sigma^2` parameter and :func:`numpy.random.normal` for the mean :math:`x`). - Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.<|endoftext|>
145dad549ec45afbfd9fc4958ef071e5c3fb75a882bce60c2e8fc9f6c43e5b43
def quantile(self, p): ' Return the p-quantile of the Gauss posterior.\n\n .. note:: It now works fine with :class:`Policies.BayesUCB` with Gauss posteriors, even if it is MUCH SLOWER than the Bernoulli posterior (:class:`Gamma`).\n ' quantile_on_x = nrdtrimn(p, 1, self.sigma) quantile_on_sigma2 = nrdtrisd(p, 1, self.mu) return (quantile_on_x * quantile_on_sigma2)
Return the p-quantile of the Gauss posterior. .. note:: It now works fine with :class:`Policies.BayesUCB` with Gauss posteriors, even if it is MUCH SLOWER than the Bernoulli posterior (:class:`Gamma`).
SMPyBandits/Policies/Posterior/Gauss.py
quantile
SlyJabiru/SMPyBandits
309
python
def quantile(self, p): ' Return the p-quantile of the Gauss posterior.\n\n .. note:: It now works fine with :class:`Policies.BayesUCB` with Gauss posteriors, even if it is MUCH SLOWER than the Bernoulli posterior (:class:`Gamma`).\n ' quantile_on_x = nrdtrimn(p, 1, self.sigma) quantile_on_sigma2 = nrdtrisd(p, 1, self.mu) return (quantile_on_x * quantile_on_sigma2)
def quantile(self, p): ' Return the p-quantile of the Gauss posterior.\n\n .. note:: It now works fine with :class:`Policies.BayesUCB` with Gauss posteriors, even if it is MUCH SLOWER than the Bernoulli posterior (:class:`Gamma`).\n ' quantile_on_x = nrdtrimn(p, 1, self.sigma) quantile_on_sigma2 = nrdtrisd(p, 1, self.mu) return (quantile_on_x * quantile_on_sigma2)<|docstring|>Return the p-quantile of the Gauss posterior. .. note:: It now works fine with :class:`Policies.BayesUCB` with Gauss posteriors, even if it is MUCH SLOWER than the Bernoulli posterior (:class:`Gamma`).<|endoftext|>
630913dc8e1b8541f8bc1f2549aee5c021a39e835572afcdcafbb09f6f6b159c
def mean(self): ' Compute the mean, :math:`\\mu` of the Gauss posterior (should be useless).' return self.mu
Compute the mean, :math:`\mu` of the Gauss posterior (should be useless).
SMPyBandits/Policies/Posterior/Gauss.py
mean
SlyJabiru/SMPyBandits
309
python
def mean(self): ' Compute the mean, :math:`\\mu` of the Gauss posterior (should be useless).' return self.mu
def mean(self): ' Compute the mean, :math:`\\mu` of the Gauss posterior (should be useless).' return self.mu<|docstring|>Compute the mean, :math:`\mu` of the Gauss posterior (should be useless).<|endoftext|>
3544e11a22561aa5a0903f520bf818cca7672e33f835ceede125dcb94c860ac8
def variance(self): ' Compute the variance, :math:`\\sigma`, of the Gauss posterior (should be useless).' return self.sigma
Compute the variance, :math:`\sigma`, of the Gauss posterior (should be useless).
SMPyBandits/Policies/Posterior/Gauss.py
variance
SlyJabiru/SMPyBandits
309
python
def variance(self): ' Compute the variance, :math:`\\sigma`, of the Gauss posterior (should be useless).' return self.sigma
def variance(self): ' Compute the variance, :math:`\\sigma`, of the Gauss posterior (should be useless).' return self.sigma<|docstring|>Compute the variance, :math:`\sigma`, of the Gauss posterior (should be useless).<|endoftext|>
9bc0ca7c2623266a6bd4157329c4bfdf6629d946d785c130acf25de9135850c5
def update(self, obs): 'Add an observation :math:`x` or a vector of observations, assumed to be drawn from an unknown normal distribution.\n ' self._nb_data += 1 self._sum_data += float(obs) (mu, sigma) = (self.mu, self.sigma) new_sigma = (1 / float(self._nb_data)) new_mu = (self._sum_data * new_sigma) (self.mu, self.sigma) = (new_mu, new_sigma)
Add an observation :math:`x` or a vector of observations, assumed to be drawn from an unknown normal distribution.
SMPyBandits/Policies/Posterior/Gauss.py
update
SlyJabiru/SMPyBandits
309
python
def update(self, obs): '\n ' self._nb_data += 1 self._sum_data += float(obs) (mu, sigma) = (self.mu, self.sigma) new_sigma = (1 / float(self._nb_data)) new_mu = (self._sum_data * new_sigma) (self.mu, self.sigma) = (new_mu, new_sigma)
def update(self, obs): '\n ' self._nb_data += 1 self._sum_data += float(obs) (mu, sigma) = (self.mu, self.sigma) new_sigma = (1 / float(self._nb_data)) new_mu = (self._sum_data * new_sigma) (self.mu, self.sigma) = (new_mu, new_sigma)<|docstring|>Add an observation :math:`x` or a vector of observations, assumed to be drawn from an unknown normal distribution.<|endoftext|>
cce8698de63f6b5c140d0e7d0627c84f4c9a830d1ebe6e1b7db59d4587932217
def forget(self, obs): 'Forget the last observation. Should work, but should also not be used...' raise NotImplementedError
Forget the last observation. Should work, but should also not be used...
SMPyBandits/Policies/Posterior/Gauss.py
forget
SlyJabiru/SMPyBandits
309
python
def forget(self, obs): raise NotImplementedError
def forget(self, obs): raise NotImplementedError<|docstring|>Forget the last observation. Should work, but should also not be used...<|endoftext|>
44ed811dbf0f678b2613e1ad57c54d387eeb246ab8e9e799daeebf06a352660e
def plot_confusion_matrix(matrix, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues, vmax=None, use_colorbar=True, y_label=True): '\n This function plots a confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n\n Args:\n matrix: confusion matrix as a numpy 2D matrix. Rows are ground-truth classes\n and columns the predicted classes. Number of rows and columns have to match\n classes: list of strings, which contain the corresponding class names for each row/column\n normalize: boolean indicating whether to perform row-wise normalization to sum 1\n title: string which will be used as title\n cmap: pyplot colormap, default: matplotlib.pyplot.cm.Blues\n vmax: float, specifies the value that corresponds to the largest value of the colormap.\n If None, the maximum value in *matrix* will be used. Default: None\n use_colorbar: boolean indicating if a colorbar should be plotted\n y_label: boolean indicating whether class names should be plotted on the y-axis as well\n\n Returns a reference to the figure\n ' assert (matrix.shape[0] == matrix.shape[1]) fig = plt.figure(figsize=([(3 + (0.5 * len(classes)))] * 2)) if normalize: matrix = (matrix.astype(np.double) / (matrix.sum(axis=1, keepdims=True) + 1e-07)) plt.imshow(matrix, interpolation='nearest', cmap=cmap, vmax=vmax) plt.title(title) if use_colorbar: plt.colorbar(fraction=0.046, pad=0.04, ticks=[0.0, 0.25, 0.5, 0.75, 1.0]).set_ticklabels(['0%', '25%', '50%', '75%', '100%']) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) if y_label: plt.yticks(tick_marks, classes) else: plt.yticks(tick_marks, ['' for cn in classes]) for (i, j) in np.ndindex(matrix.shape): plt.text(j, i, '{:.0f}%'.format((matrix[(i, j)] * 100)), horizontalalignment='center', verticalalignment='center', color=('white' if (matrix[(i, j)] > 0.5) else 'black'), fontsize='x-small') if y_label: plt.ylabel('Ground-truth class') plt.xlabel('Predicted class') plt.tight_layout() return fig
This function plots a confusion matrix. Normalization can be applied by setting `normalize=True`. Args: matrix: confusion matrix as a numpy 2D matrix. Rows are ground-truth classes and columns the predicted classes. Number of rows and columns have to match classes: list of strings, which contain the corresponding class names for each row/column normalize: boolean indicating whether to perform row-wise normalization to sum 1 title: string which will be used as title cmap: pyplot colormap, default: matplotlib.pyplot.cm.Blues vmax: float, specifies the value that corresponds to the largest value of the colormap. If None, the maximum value in *matrix* will be used. Default: None use_colorbar: boolean indicating if a colorbar should be plotted y_label: boolean indicating whether class names should be plotted on the y-axis as well Returns a reference to the figure
visualization/plot_utils.py
plot_confusion_matrix
shelviaandi/CameraTraps
0
python
def plot_confusion_matrix(matrix, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues, vmax=None, use_colorbar=True, y_label=True): '\n This function plots a confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n\n Args:\n matrix: confusion matrix as a numpy 2D matrix. Rows are ground-truth classes\n and columns the predicted classes. Number of rows and columns have to match\n classes: list of strings, which contain the corresponding class names for each row/column\n normalize: boolean indicating whether to perform row-wise normalization to sum 1\n title: string which will be used as title\n cmap: pyplot colormap, default: matplotlib.pyplot.cm.Blues\n vmax: float, specifies the value that corresponds to the largest value of the colormap.\n If None, the maximum value in *matrix* will be used. Default: None\n use_colorbar: boolean indicating if a colorbar should be plotted\n y_label: boolean indicating whether class names should be plotted on the y-axis as well\n\n Returns a reference to the figure\n ' assert (matrix.shape[0] == matrix.shape[1]) fig = plt.figure(figsize=([(3 + (0.5 * len(classes)))] * 2)) if normalize: matrix = (matrix.astype(np.double) / (matrix.sum(axis=1, keepdims=True) + 1e-07)) plt.imshow(matrix, interpolation='nearest', cmap=cmap, vmax=vmax) plt.title(title) if use_colorbar: plt.colorbar(fraction=0.046, pad=0.04, ticks=[0.0, 0.25, 0.5, 0.75, 1.0]).set_ticklabels(['0%', '25%', '50%', '75%', '100%']) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) if y_label: plt.yticks(tick_marks, classes) else: plt.yticks(tick_marks, [ for cn in classes]) for (i, j) in np.ndindex(matrix.shape): plt.text(j, i, '{:.0f}%'.format((matrix[(i, j)] * 100)), horizontalalignment='center', verticalalignment='center', color=('white' if (matrix[(i, j)] > 0.5) else 'black'), fontsize='x-small') if y_label: plt.ylabel('Ground-truth class') plt.xlabel('Predicted class') plt.tight_layout() return fig
def plot_confusion_matrix(matrix, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues, vmax=None, use_colorbar=True, y_label=True): '\n This function plots a confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n\n Args:\n matrix: confusion matrix as a numpy 2D matrix. Rows are ground-truth classes\n and columns the predicted classes. Number of rows and columns have to match\n classes: list of strings, which contain the corresponding class names for each row/column\n normalize: boolean indicating whether to perform row-wise normalization to sum 1\n title: string which will be used as title\n cmap: pyplot colormap, default: matplotlib.pyplot.cm.Blues\n vmax: float, specifies the value that corresponds to the largest value of the colormap.\n If None, the maximum value in *matrix* will be used. Default: None\n use_colorbar: boolean indicating if a colorbar should be plotted\n y_label: boolean indicating whether class names should be plotted on the y-axis as well\n\n Returns a reference to the figure\n ' assert (matrix.shape[0] == matrix.shape[1]) fig = plt.figure(figsize=([(3 + (0.5 * len(classes)))] * 2)) if normalize: matrix = (matrix.astype(np.double) / (matrix.sum(axis=1, keepdims=True) + 1e-07)) plt.imshow(matrix, interpolation='nearest', cmap=cmap, vmax=vmax) plt.title(title) if use_colorbar: plt.colorbar(fraction=0.046, pad=0.04, ticks=[0.0, 0.25, 0.5, 0.75, 1.0]).set_ticklabels(['0%', '25%', '50%', '75%', '100%']) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) if y_label: plt.yticks(tick_marks, classes) else: plt.yticks(tick_marks, [ for cn in classes]) for (i, j) in np.ndindex(matrix.shape): plt.text(j, i, '{:.0f}%'.format((matrix[(i, j)] * 100)), horizontalalignment='center', verticalalignment='center', color=('white' if (matrix[(i, j)] > 0.5) else 'black'), fontsize='x-small') if y_label: plt.ylabel('Ground-truth class') plt.xlabel('Predicted class') plt.tight_layout() return fig<|docstring|>This function plots a confusion matrix. Normalization can be applied by setting `normalize=True`. Args: matrix: confusion matrix as a numpy 2D matrix. Rows are ground-truth classes and columns the predicted classes. Number of rows and columns have to match classes: list of strings, which contain the corresponding class names for each row/column normalize: boolean indicating whether to perform row-wise normalization to sum 1 title: string which will be used as title cmap: pyplot colormap, default: matplotlib.pyplot.cm.Blues vmax: float, specifies the value that corresponds to the largest value of the colormap. If None, the maximum value in *matrix* will be used. Default: None use_colorbar: boolean indicating if a colorbar should be plotted y_label: boolean indicating whether class names should be plotted on the y-axis as well Returns a reference to the figure<|endoftext|>
37a32f88b8f3171aa2c14c7a364f403ac9bd1e58fbc7bafd8274ece711b71c68
def plot_precision_recall_curve(precisions, recalls, title='Precision/Recall curve'): '\n Plots the precision recall curve given lists of (ordered) precision\n and recall values\n Args:\n precisions: list of floats, the precision for the corresponding recall values.\n Should have same length as *recalls*.\n recalls: list of floats, the recall values for corresponding precision values.\n Should have same length as *precisions*.\n title: string that will be as as plot title\n\n Returns a reference to the figure\n ' step_kwargs = {'step': 'post'} fig = plt.figure() plt.title(title) plt.step(recalls, precisions, color='b', alpha=0.2, where='post') plt.fill_between(recalls, precisions, alpha=0.2, color='b', **step_kwargs) plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.05]) return fig
Plots the precision recall curve given lists of (ordered) precision and recall values Args: precisions: list of floats, the precision for the corresponding recall values. Should have same length as *recalls*. recalls: list of floats, the recall values for corresponding precision values. Should have same length as *precisions*. title: string that will be as as plot title Returns a reference to the figure
visualization/plot_utils.py
plot_precision_recall_curve
shelviaandi/CameraTraps
0
python
def plot_precision_recall_curve(precisions, recalls, title='Precision/Recall curve'): '\n Plots the precision recall curve given lists of (ordered) precision\n and recall values\n Args:\n precisions: list of floats, the precision for the corresponding recall values.\n Should have same length as *recalls*.\n recalls: list of floats, the recall values for corresponding precision values.\n Should have same length as *precisions*.\n title: string that will be as as plot title\n\n Returns a reference to the figure\n ' step_kwargs = {'step': 'post'} fig = plt.figure() plt.title(title) plt.step(recalls, precisions, color='b', alpha=0.2, where='post') plt.fill_between(recalls, precisions, alpha=0.2, color='b', **step_kwargs) plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.05]) return fig
def plot_precision_recall_curve(precisions, recalls, title='Precision/Recall curve'): '\n Plots the precision recall curve given lists of (ordered) precision\n and recall values\n Args:\n precisions: list of floats, the precision for the corresponding recall values.\n Should have same length as *recalls*.\n recalls: list of floats, the recall values for corresponding precision values.\n Should have same length as *precisions*.\n title: string that will be as as plot title\n\n Returns a reference to the figure\n ' step_kwargs = {'step': 'post'} fig = plt.figure() plt.title(title) plt.step(recalls, precisions, color='b', alpha=0.2, where='post') plt.fill_between(recalls, precisions, alpha=0.2, color='b', **step_kwargs) plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.05]) return fig<|docstring|>Plots the precision recall curve given lists of (ordered) precision and recall values Args: precisions: list of floats, the precision for the corresponding recall values. Should have same length as *recalls*. recalls: list of floats, the recall values for corresponding precision values. Should have same length as *precisions*. title: string that will be as as plot title Returns a reference to the figure<|endoftext|>
c4c70a09201dee196c0932176f5a78d2750fe205ec32c636309b766c6b20742b
def plot_stacked_bar_chart(data, series_labels, col_labels=None, x_label=None, y_label=None, log_scale=False): '\n For plotting e.g. species distribution across locations.\n Reference: https://stackoverflow.com/questions/44309507/stacked-bar-plot-using-matplotlib\n Args:\n data: a 2-dimensional numpy array or nested list containing data for each series (species)\n in rows (1st dimension) across locations (columns, 2nd dimension)\n\n Returns:\n the plot that can then be saved as a png.\n ' fig = plt.figure() ax = plt.subplot(111) data = np.array(data) (num_series, num_columns) = data.shape ind = list(range(num_columns)) colors = cm.rainbow(np.linspace(0, 1, num_series)) cumulative_size = np.zeros(num_columns) for (i, row_data) in enumerate(data): ax.bar(ind, row_data, bottom=cumulative_size, label=series_labels[i], color=colors[i]) cumulative_size += row_data if (col_labels and (len(col_labels) < 25)): ax.set_xticks(ind) ax.set_xticklabels(col_labels, rotation=90) elif col_labels: ax.set_xticks(list(range(0, len(col_labels), 20))) ax.set_xticklabels(col_labels, rotation=90) if x_label: ax.set_xlabel(x_label) if y_label: ax.set_ylabel(y_label) if log_scale: ax.set_yscale('log') box = ax.get_position() ax.set_position([box.x0, box.y0, (box.width * 0.8), box.height]) ax.legend(loc='center left', bbox_to_anchor=(0.99, 0.5), frameon=False) return fig
For plotting e.g. species distribution across locations. Reference: https://stackoverflow.com/questions/44309507/stacked-bar-plot-using-matplotlib Args: data: a 2-dimensional numpy array or nested list containing data for each series (species) in rows (1st dimension) across locations (columns, 2nd dimension) Returns: the plot that can then be saved as a png.
visualization/plot_utils.py
plot_stacked_bar_chart
shelviaandi/CameraTraps
0
python
def plot_stacked_bar_chart(data, series_labels, col_labels=None, x_label=None, y_label=None, log_scale=False): '\n For plotting e.g. species distribution across locations.\n Reference: https://stackoverflow.com/questions/44309507/stacked-bar-plot-using-matplotlib\n Args:\n data: a 2-dimensional numpy array or nested list containing data for each series (species)\n in rows (1st dimension) across locations (columns, 2nd dimension)\n\n Returns:\n the plot that can then be saved as a png.\n ' fig = plt.figure() ax = plt.subplot(111) data = np.array(data) (num_series, num_columns) = data.shape ind = list(range(num_columns)) colors = cm.rainbow(np.linspace(0, 1, num_series)) cumulative_size = np.zeros(num_columns) for (i, row_data) in enumerate(data): ax.bar(ind, row_data, bottom=cumulative_size, label=series_labels[i], color=colors[i]) cumulative_size += row_data if (col_labels and (len(col_labels) < 25)): ax.set_xticks(ind) ax.set_xticklabels(col_labels, rotation=90) elif col_labels: ax.set_xticks(list(range(0, len(col_labels), 20))) ax.set_xticklabels(col_labels, rotation=90) if x_label: ax.set_xlabel(x_label) if y_label: ax.set_ylabel(y_label) if log_scale: ax.set_yscale('log') box = ax.get_position() ax.set_position([box.x0, box.y0, (box.width * 0.8), box.height]) ax.legend(loc='center left', bbox_to_anchor=(0.99, 0.5), frameon=False) return fig
def plot_stacked_bar_chart(data, series_labels, col_labels=None, x_label=None, y_label=None, log_scale=False): '\n For plotting e.g. species distribution across locations.\n Reference: https://stackoverflow.com/questions/44309507/stacked-bar-plot-using-matplotlib\n Args:\n data: a 2-dimensional numpy array or nested list containing data for each series (species)\n in rows (1st dimension) across locations (columns, 2nd dimension)\n\n Returns:\n the plot that can then be saved as a png.\n ' fig = plt.figure() ax = plt.subplot(111) data = np.array(data) (num_series, num_columns) = data.shape ind = list(range(num_columns)) colors = cm.rainbow(np.linspace(0, 1, num_series)) cumulative_size = np.zeros(num_columns) for (i, row_data) in enumerate(data): ax.bar(ind, row_data, bottom=cumulative_size, label=series_labels[i], color=colors[i]) cumulative_size += row_data if (col_labels and (len(col_labels) < 25)): ax.set_xticks(ind) ax.set_xticklabels(col_labels, rotation=90) elif col_labels: ax.set_xticks(list(range(0, len(col_labels), 20))) ax.set_xticklabels(col_labels, rotation=90) if x_label: ax.set_xlabel(x_label) if y_label: ax.set_ylabel(y_label) if log_scale: ax.set_yscale('log') box = ax.get_position() ax.set_position([box.x0, box.y0, (box.width * 0.8), box.height]) ax.legend(loc='center left', bbox_to_anchor=(0.99, 0.5), frameon=False) return fig<|docstring|>For plotting e.g. species distribution across locations. Reference: https://stackoverflow.com/questions/44309507/stacked-bar-plot-using-matplotlib Args: data: a 2-dimensional numpy array or nested list containing data for each series (species) in rows (1st dimension) across locations (columns, 2nd dimension) Returns: the plot that can then be saved as a png.<|endoftext|>
a40b615134280436fcab84e3491066398ef73d2ab10a3614e9a9b989e24c06a5
def fine_tune_model(model, optimizer, batch_size, epochs, freeze_num): '\n discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式\n \n MODEL:传入的模型,VGG16, ResNet50, ...\n\n optimizer: fine-tune all layers 的优化器, first part默认用adadelta\n batch_size: 每一批的尺寸,建议32/64/128\n epochs: fine-tune all layers的代数\n freeze_num: first part冻结卷积层的数量\n ' for layer in model.layers[:freeze_num]: layer.trainable = False model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=3, shuffle=True, verbose=1, validation_data=(x_valid, y_valid)) print('Finish step_1') for layer in model.layers[freeze_num:]: layer.trainable = True rc = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=1, mode='min') model_name = (model.name + '.hdf5') mc = ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=1, mode='min') el = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, restore_best_weights=True) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) history_fit = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=1, validation_data=(x_valid, y_valid), callbacks=[mc, rc, el]) print('Finish fine-tune') return history_fit
discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式 MODEL:传入的模型,VGG16, ResNet50, ... optimizer: fine-tune all layers 的优化器, first part默认用adadelta batch_size: 每一批的尺寸,建议32/64/128 epochs: fine-tune all layers的代数 freeze_num: first part冻结卷积层的数量
bird/web/vgg16_train.py
fine_tune_model
birds-oucteam9/birds-team9
0
python
def fine_tune_model(model, optimizer, batch_size, epochs, freeze_num): '\n discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式\n \n MODEL:传入的模型,VGG16, ResNet50, ...\n\n optimizer: fine-tune all layers 的优化器, first part默认用adadelta\n batch_size: 每一批的尺寸,建议32/64/128\n epochs: fine-tune all layers的代数\n freeze_num: first part冻结卷积层的数量\n ' for layer in model.layers[:freeze_num]: layer.trainable = False model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=3, shuffle=True, verbose=1, validation_data=(x_valid, y_valid)) print('Finish step_1') for layer in model.layers[freeze_num:]: layer.trainable = True rc = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=1, mode='min') model_name = (model.name + '.hdf5') mc = ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=1, mode='min') el = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, restore_best_weights=True) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) history_fit = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=1, validation_data=(x_valid, y_valid), callbacks=[mc, rc, el]) print('Finish fine-tune') return history_fit
def fine_tune_model(model, optimizer, batch_size, epochs, freeze_num): '\n discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式\n \n MODEL:传入的模型,VGG16, ResNet50, ...\n\n optimizer: fine-tune all layers 的优化器, first part默认用adadelta\n batch_size: 每一批的尺寸,建议32/64/128\n epochs: fine-tune all layers的代数\n freeze_num: first part冻结卷积层的数量\n ' for layer in model.layers[:freeze_num]: layer.trainable = False model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=3, shuffle=True, verbose=1, validation_data=(x_valid, y_valid)) print('Finish step_1') for layer in model.layers[freeze_num:]: layer.trainable = True rc = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=1, mode='min') model_name = (model.name + '.hdf5') mc = ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=1, mode='min') el = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, restore_best_weights=True) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) history_fit = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=1, validation_data=(x_valid, y_valid), callbacks=[mc, rc, el]) print('Finish fine-tune') return history_fit<|docstring|>discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式 MODEL:传入的模型,VGG16, ResNet50, ... optimizer: fine-tune all layers 的优化器, first part默认用adadelta batch_size: 每一批的尺寸,建议32/64/128 epochs: fine-tune all layers的代数 freeze_num: first part冻结卷积层的数量<|endoftext|>
2070534835f9d4d53b08d203c0a3b073286d0719515ef91b2b5f1ff717334d46
def se_block(input_feature, ratio=8): 'Contains the implementation of Squeeze-and-Excitation(SE) block.\n\tAs described in https://arxiv.org/abs/1709.01507.\n\t' channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, channel)) se_feature = Dense((channel // ratio), activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, (channel // ratio))) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, channel)) if (K.image_data_format() == 'channels_first'): se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature
Contains the implementation of Squeeze-and-Excitation(SE) block. As described in https://arxiv.org/abs/1709.01507.
bird/web/vgg16_train.py
se_block
birds-oucteam9/birds-team9
0
python
def se_block(input_feature, ratio=8): 'Contains the implementation of Squeeze-and-Excitation(SE) block.\n\tAs described in https://arxiv.org/abs/1709.01507.\n\t' channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, channel)) se_feature = Dense((channel // ratio), activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, (channel // ratio))) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, channel)) if (K.image_data_format() == 'channels_first'): se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature
def se_block(input_feature, ratio=8): 'Contains the implementation of Squeeze-and-Excitation(SE) block.\n\tAs described in https://arxiv.org/abs/1709.01507.\n\t' channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, channel)) se_feature = Dense((channel // ratio), activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, (channel // ratio))) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert (se_feature._keras_shape[1:] == (1, 1, channel)) if (K.image_data_format() == 'channels_first'): se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature<|docstring|>Contains the implementation of Squeeze-and-Excitation(SE) block. As described in https://arxiv.org/abs/1709.01507.<|endoftext|>
7ece6f1128bf24e6cdb0c2f00c73d472517196c05582a556bf2502148165b6a9
def cbam_block(cbam_feature, ratio=8): 'Contains the implementation of Convolutional Block Attention Module(CBAM) block.\n\tAs described in https://arxiv.org/abs/1807.06521.\n\t' cbam_feature = channel_attention(cbam_feature, ratio) cbam_feature = spatial_attention(cbam_feature) return cbam_feature
Contains the implementation of Convolutional Block Attention Module(CBAM) block. As described in https://arxiv.org/abs/1807.06521.
bird/web/vgg16_train.py
cbam_block
birds-oucteam9/birds-team9
0
python
def cbam_block(cbam_feature, ratio=8): 'Contains the implementation of Convolutional Block Attention Module(CBAM) block.\n\tAs described in https://arxiv.org/abs/1807.06521.\n\t' cbam_feature = channel_attention(cbam_feature, ratio) cbam_feature = spatial_attention(cbam_feature) return cbam_feature
def cbam_block(cbam_feature, ratio=8): 'Contains the implementation of Convolutional Block Attention Module(CBAM) block.\n\tAs described in https://arxiv.org/abs/1807.06521.\n\t' cbam_feature = channel_attention(cbam_feature, ratio) cbam_feature = spatial_attention(cbam_feature) return cbam_feature<|docstring|>Contains the implementation of Convolutional Block Attention Module(CBAM) block. As described in https://arxiv.org/abs/1807.06521.<|endoftext|>
49efba06185d1a8893a0f711bd2ccb8dfad8efec5d8144840571fe5783e48d49
def _linthompsamp_score(self, context): 'Thompson Sampling' action_ids = list(six.viewkeys(context)) context_array = np.asarray([context[action_id] for action_id in action_ids]) model = self._model_storage.get_model() B = model['B'] mu_hat = model['mu_hat'] v = (self.R * np.sqrt((((24 / self.epsilon) * self.context_dimension) * np.log((1 / self.delta))))) mu_tilde = self.random_state.multivariate_normal(mu_hat.flat, ((v ** 2) * np.linalg.inv(B)))[(..., np.newaxis)] estimated_reward_array = context_array.dot(mu_hat) score_array = context_array.dot(mu_tilde) estimated_reward_dict = {} uncertainty_dict = {} score_dict = {} for (action_id, estimated_reward, score) in zip(action_ids, estimated_reward_array, score_array): estimated_reward_dict[action_id] = float(estimated_reward) score_dict[action_id] = float(score) uncertainty_dict[action_id] = float((score - estimated_reward)) return (estimated_reward_dict, uncertainty_dict, score_dict)
Thompson Sampling
striatum/bandit/linthompsamp.py
_linthompsamp_score
zhengcaoscu/bandit
108
python
def _linthompsamp_score(self, context): action_ids = list(six.viewkeys(context)) context_array = np.asarray([context[action_id] for action_id in action_ids]) model = self._model_storage.get_model() B = model['B'] mu_hat = model['mu_hat'] v = (self.R * np.sqrt((((24 / self.epsilon) * self.context_dimension) * np.log((1 / self.delta))))) mu_tilde = self.random_state.multivariate_normal(mu_hat.flat, ((v ** 2) * np.linalg.inv(B)))[(..., np.newaxis)] estimated_reward_array = context_array.dot(mu_hat) score_array = context_array.dot(mu_tilde) estimated_reward_dict = {} uncertainty_dict = {} score_dict = {} for (action_id, estimated_reward, score) in zip(action_ids, estimated_reward_array, score_array): estimated_reward_dict[action_id] = float(estimated_reward) score_dict[action_id] = float(score) uncertainty_dict[action_id] = float((score - estimated_reward)) return (estimated_reward_dict, uncertainty_dict, score_dict)
def _linthompsamp_score(self, context): action_ids = list(six.viewkeys(context)) context_array = np.asarray([context[action_id] for action_id in action_ids]) model = self._model_storage.get_model() B = model['B'] mu_hat = model['mu_hat'] v = (self.R * np.sqrt((((24 / self.epsilon) * self.context_dimension) * np.log((1 / self.delta))))) mu_tilde = self.random_state.multivariate_normal(mu_hat.flat, ((v ** 2) * np.linalg.inv(B)))[(..., np.newaxis)] estimated_reward_array = context_array.dot(mu_hat) score_array = context_array.dot(mu_tilde) estimated_reward_dict = {} uncertainty_dict = {} score_dict = {} for (action_id, estimated_reward, score) in zip(action_ids, estimated_reward_array, score_array): estimated_reward_dict[action_id] = float(estimated_reward) score_dict[action_id] = float(score) uncertainty_dict[action_id] = float((score - estimated_reward)) return (estimated_reward_dict, uncertainty_dict, score_dict)<|docstring|>Thompson Sampling<|endoftext|>
4d4cf392e9cfe260404a41bcc70e87c152e77c4a5e7b0c1f5a26a6ed314ca47d
def get_action(self, context, n_actions=None): 'Return the action to perform\n\n Parameters\n ----------\n context : dictionary\n Contexts {action_id: context} of different actions.\n\n n_actions: int (default: None)\n Number of actions wanted to recommend users. If None, only return\n one action. If -1, get all actions.\n\n Returns\n -------\n history_id : int\n The history id of the action.\n\n recommendations : list of dict\n Each dict contains\n {Action object, estimated_reward, uncertainty}.\n ' if (self._action_storage.count() == 0): return self._get_action_with_empty_action_storage(context, n_actions) if (not isinstance(context, dict)): raise ValueError('LinThompSamp requires context dict for all actions!') if (n_actions == (- 1)): n_actions = self._action_storage.count() (estimated_reward, uncertainty, score) = self._linthompsamp_score(context) if (n_actions is None): recommendation_id = max(score, key=score.get) recommendations = self._recommendation_cls(action=self._action_storage.get(recommendation_id), estimated_reward=estimated_reward[recommendation_id], uncertainty=uncertainty[recommendation_id], score=score[recommendation_id]) else: recommendation_ids = sorted(score, key=score.get, reverse=True)[:n_actions] recommendations = [] for action_id in recommendation_ids: recommendations.append(self._recommendation_cls(action=self._action_storage.get(action_id), estimated_reward=estimated_reward[action_id], uncertainty=uncertainty[action_id], score=score[action_id])) history_id = self._history_storage.add_history(context, recommendations) return (history_id, recommendations)
Return the action to perform Parameters ---------- context : dictionary Contexts {action_id: context} of different actions. n_actions: int (default: None) Number of actions wanted to recommend users. If None, only return one action. If -1, get all actions. Returns ------- history_id : int The history id of the action. recommendations : list of dict Each dict contains {Action object, estimated_reward, uncertainty}.
striatum/bandit/linthompsamp.py
get_action
zhengcaoscu/bandit
108
python
def get_action(self, context, n_actions=None): 'Return the action to perform\n\n Parameters\n ----------\n context : dictionary\n Contexts {action_id: context} of different actions.\n\n n_actions: int (default: None)\n Number of actions wanted to recommend users. If None, only return\n one action. If -1, get all actions.\n\n Returns\n -------\n history_id : int\n The history id of the action.\n\n recommendations : list of dict\n Each dict contains\n {Action object, estimated_reward, uncertainty}.\n ' if (self._action_storage.count() == 0): return self._get_action_with_empty_action_storage(context, n_actions) if (not isinstance(context, dict)): raise ValueError('LinThompSamp requires context dict for all actions!') if (n_actions == (- 1)): n_actions = self._action_storage.count() (estimated_reward, uncertainty, score) = self._linthompsamp_score(context) if (n_actions is None): recommendation_id = max(score, key=score.get) recommendations = self._recommendation_cls(action=self._action_storage.get(recommendation_id), estimated_reward=estimated_reward[recommendation_id], uncertainty=uncertainty[recommendation_id], score=score[recommendation_id]) else: recommendation_ids = sorted(score, key=score.get, reverse=True)[:n_actions] recommendations = [] for action_id in recommendation_ids: recommendations.append(self._recommendation_cls(action=self._action_storage.get(action_id), estimated_reward=estimated_reward[action_id], uncertainty=uncertainty[action_id], score=score[action_id])) history_id = self._history_storage.add_history(context, recommendations) return (history_id, recommendations)
def get_action(self, context, n_actions=None): 'Return the action to perform\n\n Parameters\n ----------\n context : dictionary\n Contexts {action_id: context} of different actions.\n\n n_actions: int (default: None)\n Number of actions wanted to recommend users. If None, only return\n one action. If -1, get all actions.\n\n Returns\n -------\n history_id : int\n The history id of the action.\n\n recommendations : list of dict\n Each dict contains\n {Action object, estimated_reward, uncertainty}.\n ' if (self._action_storage.count() == 0): return self._get_action_with_empty_action_storage(context, n_actions) if (not isinstance(context, dict)): raise ValueError('LinThompSamp requires context dict for all actions!') if (n_actions == (- 1)): n_actions = self._action_storage.count() (estimated_reward, uncertainty, score) = self._linthompsamp_score(context) if (n_actions is None): recommendation_id = max(score, key=score.get) recommendations = self._recommendation_cls(action=self._action_storage.get(recommendation_id), estimated_reward=estimated_reward[recommendation_id], uncertainty=uncertainty[recommendation_id], score=score[recommendation_id]) else: recommendation_ids = sorted(score, key=score.get, reverse=True)[:n_actions] recommendations = [] for action_id in recommendation_ids: recommendations.append(self._recommendation_cls(action=self._action_storage.get(action_id), estimated_reward=estimated_reward[action_id], uncertainty=uncertainty[action_id], score=score[action_id])) history_id = self._history_storage.add_history(context, recommendations) return (history_id, recommendations)<|docstring|>Return the action to perform Parameters ---------- context : dictionary Contexts {action_id: context} of different actions. n_actions: int (default: None) Number of actions wanted to recommend users. If None, only return one action. If -1, get all actions. Returns ------- history_id : int The history id of the action. recommendations : list of dict Each dict contains {Action object, estimated_reward, uncertainty}.<|endoftext|>
aac12137bd252a13b315332038edbfb48e742766b19e547109c227c6575dcb17
def reward(self, history_id, rewards): 'Reward the previous action with reward.\n\n Parameters\n ----------\n history_id : int\n The history id of the action to reward.\n\n rewards : dictionary\n The dictionary {action_id, reward}, where reward is a float.\n ' context = self._history_storage.get_unrewarded_history(history_id).context model = self._model_storage.get_model() B = model['B'] f = model['f'] for (action_id, reward) in six.viewitems(rewards): context_t = np.reshape(context[action_id], ((- 1), 1)) B += context_t.dot(context_t.T) f += (reward * context_t) mu_hat = np.linalg.inv(B).dot(f) self._model_storage.save_model({'B': B, 'mu_hat': mu_hat, 'f': f}) self._history_storage.add_reward(history_id, rewards)
Reward the previous action with reward. Parameters ---------- history_id : int The history id of the action to reward. rewards : dictionary The dictionary {action_id, reward}, where reward is a float.
striatum/bandit/linthompsamp.py
reward
zhengcaoscu/bandit
108
python
def reward(self, history_id, rewards): 'Reward the previous action with reward.\n\n Parameters\n ----------\n history_id : int\n The history id of the action to reward.\n\n rewards : dictionary\n The dictionary {action_id, reward}, where reward is a float.\n ' context = self._history_storage.get_unrewarded_history(history_id).context model = self._model_storage.get_model() B = model['B'] f = model['f'] for (action_id, reward) in six.viewitems(rewards): context_t = np.reshape(context[action_id], ((- 1), 1)) B += context_t.dot(context_t.T) f += (reward * context_t) mu_hat = np.linalg.inv(B).dot(f) self._model_storage.save_model({'B': B, 'mu_hat': mu_hat, 'f': f}) self._history_storage.add_reward(history_id, rewards)
def reward(self, history_id, rewards): 'Reward the previous action with reward.\n\n Parameters\n ----------\n history_id : int\n The history id of the action to reward.\n\n rewards : dictionary\n The dictionary {action_id, reward}, where reward is a float.\n ' context = self._history_storage.get_unrewarded_history(history_id).context model = self._model_storage.get_model() B = model['B'] f = model['f'] for (action_id, reward) in six.viewitems(rewards): context_t = np.reshape(context[action_id], ((- 1), 1)) B += context_t.dot(context_t.T) f += (reward * context_t) mu_hat = np.linalg.inv(B).dot(f) self._model_storage.save_model({'B': B, 'mu_hat': mu_hat, 'f': f}) self._history_storage.add_reward(history_id, rewards)<|docstring|>Reward the previous action with reward. Parameters ---------- history_id : int The history id of the action to reward. rewards : dictionary The dictionary {action_id, reward}, where reward is a float.<|endoftext|>
50fa13687ebfa19044f724c17f977a8fd8c89b9cca873bdc76c9d360082d12a4
def add_action(self, actions): ' Add new actions (if needed).\n\n Parameters\n ----------\n actions : iterable\n A list of Action oBjects for recommendation\n ' self._action_storage.add(actions)
Add new actions (if needed). Parameters ---------- actions : iterable A list of Action oBjects for recommendation
striatum/bandit/linthompsamp.py
add_action
zhengcaoscu/bandit
108
python
def add_action(self, actions): ' Add new actions (if needed).\n\n Parameters\n ----------\n actions : iterable\n A list of Action oBjects for recommendation\n ' self._action_storage.add(actions)
def add_action(self, actions): ' Add new actions (if needed).\n\n Parameters\n ----------\n actions : iterable\n A list of Action oBjects for recommendation\n ' self._action_storage.add(actions)<|docstring|>Add new actions (if needed). Parameters ---------- actions : iterable A list of Action oBjects for recommendation<|endoftext|>
768632a1d3f36c02e22020c84a0ee72effe7bdc804c2b3338bbf273496f6ede9
def remove_action(self, action_id): 'Remove action by id.\n\n Parameters\n ----------\n action_id : int\n The id of the action to remove.\n ' self._action_storage.remove(action_id)
Remove action by id. Parameters ---------- action_id : int The id of the action to remove.
striatum/bandit/linthompsamp.py
remove_action
zhengcaoscu/bandit
108
python
def remove_action(self, action_id): 'Remove action by id.\n\n Parameters\n ----------\n action_id : int\n The id of the action to remove.\n ' self._action_storage.remove(action_id)
def remove_action(self, action_id): 'Remove action by id.\n\n Parameters\n ----------\n action_id : int\n The id of the action to remove.\n ' self._action_storage.remove(action_id)<|docstring|>Remove action by id. Parameters ---------- action_id : int The id of the action to remove.<|endoftext|>
fbf51a5f2be4a147f84fa827805dd0533153bd5686a2b4616722d73c49ad9fcb
def main(): ' Find the best model to fit the dataset and save it into file ' grid_search = new_grid_search() run_grid_search(grid_search) save_search_results(grid_search)
Find the best model to fit the dataset and save it into file
src/train.py
main
anandmohan1505/credit-card-fraud-detection
59
python
def main(): ' ' grid_search = new_grid_search() run_grid_search(grid_search) save_search_results(grid_search)
def main(): ' ' grid_search = new_grid_search() run_grid_search(grid_search) save_search_results(grid_search)<|docstring|>Find the best model to fit the dataset and save it into file<|endoftext|>
52dddda05376959d47fa6cdf0cd3997eda1a76ee3f45ca1c00f4ce45e184a47e
def split_dataset(): ' Read and split dataset into train and test subsets ' df = pd.read_csv(DATASET_FILENAME, header=0) X = df[df.columns[:(- 1)]].as_matrix() y = df[df.columns[(- 1)]].as_matrix() return train_test_split(X, y, test_size=0.2, random_state=42)
Read and split dataset into train and test subsets
src/train.py
split_dataset
anandmohan1505/credit-card-fraud-detection
59
python
def split_dataset(): ' ' df = pd.read_csv(DATASET_FILENAME, header=0) X = df[df.columns[:(- 1)]].as_matrix() y = df[df.columns[(- 1)]].as_matrix() return train_test_split(X, y, test_size=0.2, random_state=42)
def split_dataset(): ' ' df = pd.read_csv(DATASET_FILENAME, header=0) X = df[df.columns[:(- 1)]].as_matrix() y = df[df.columns[(- 1)]].as_matrix() return train_test_split(X, y, test_size=0.2, random_state=42)<|docstring|>Read and split dataset into train and test subsets<|endoftext|>
810125ddb269a36cdbff64490b9016d18132c982bba3f9ce6c9b06eb3d599237
def new_grid_search(): ' Create new GridSearch obj with models pipeline ' pipeline = Pipeline([(u'clf', LogisticRegression(class_weight='balanced'))]) search_params = {'clf__C': (0.0001, 0.01, 1.0, 100.0, 10000.0)} return GridSearchCV(estimator=pipeline, param_grid=search_params, scoring='recall_macro', cv=10, n_jobs=(- 1), verbose=3)
Create new GridSearch obj with models pipeline
src/train.py
new_grid_search
anandmohan1505/credit-card-fraud-detection
59
python
def new_grid_search(): ' ' pipeline = Pipeline([(u'clf', LogisticRegression(class_weight='balanced'))]) search_params = {'clf__C': (0.0001, 0.01, 1.0, 100.0, 10000.0)} return GridSearchCV(estimator=pipeline, param_grid=search_params, scoring='recall_macro', cv=10, n_jobs=(- 1), verbose=3)
def new_grid_search(): ' ' pipeline = Pipeline([(u'clf', LogisticRegression(class_weight='balanced'))]) search_params = {'clf__C': (0.0001, 0.01, 1.0, 100.0, 10000.0)} return GridSearchCV(estimator=pipeline, param_grid=search_params, scoring='recall_macro', cv=10, n_jobs=(- 1), verbose=3)<|docstring|>Create new GridSearch obj with models pipeline<|endoftext|>
1313c608be7966b62339e81e13e35d307b5bd1b9c1c278fdfbb915af45dccb1d
def run_grid_search(grid_search, show_evaluation=True): ' Run the GridSearch algorithm and compute evaluation metrics ' (X_train, X_test, y_train, y_test) = split_dataset() grid_search.fit(X_train, y_train) predictions = grid_search.predict(X_test) if show_evaluation: logger.debug('macro_recall: %s', recall_score(y_test, predictions, average='macro')) logger.debug(precision_recall_fscore_support(y_test, predictions)) logger.debug(confusion_matrix(y_test, predictions))
Run the GridSearch algorithm and compute evaluation metrics
src/train.py
run_grid_search
anandmohan1505/credit-card-fraud-detection
59
python
def run_grid_search(grid_search, show_evaluation=True): ' ' (X_train, X_test, y_train, y_test) = split_dataset() grid_search.fit(X_train, y_train) predictions = grid_search.predict(X_test) if show_evaluation: logger.debug('macro_recall: %s', recall_score(y_test, predictions, average='macro')) logger.debug(precision_recall_fscore_support(y_test, predictions)) logger.debug(confusion_matrix(y_test, predictions))
def run_grid_search(grid_search, show_evaluation=True): ' ' (X_train, X_test, y_train, y_test) = split_dataset() grid_search.fit(X_train, y_train) predictions = grid_search.predict(X_test) if show_evaluation: logger.debug('macro_recall: %s', recall_score(y_test, predictions, average='macro')) logger.debug(precision_recall_fscore_support(y_test, predictions)) logger.debug(confusion_matrix(y_test, predictions))<|docstring|>Run the GridSearch algorithm and compute evaluation metrics<|endoftext|>
9a31fe271a41439a5cba191581b1fa08cd7de9227b08f7a49205dd1be296b82c
def save_search_results(grid_search): ' Serialize model into file ' joblib.dump(grid_search.best_estimator_, MODEL_FILENAME)
Serialize model into file
src/train.py
save_search_results
anandmohan1505/credit-card-fraud-detection
59
python
def save_search_results(grid_search): ' ' joblib.dump(grid_search.best_estimator_, MODEL_FILENAME)
def save_search_results(grid_search): ' ' joblib.dump(grid_search.best_estimator_, MODEL_FILENAME)<|docstring|>Serialize model into file<|endoftext|>
f335df0d57e05feee5395172cd328ab342a69072c5a21b9c2464519ba7b8fa0f
def path_exists(file_path): 'Check if a file exists\n\n Unlike os.path.exists, this throws an exception if there is an error\n checking if the file exists (for example, if there is a perms error on\n the parent dir).\n\n Returns:\n bool: True if the file exists; False if not.\n ' try: os.stat(file_path) return True except OSError as e: if (e.errno != errno.ENOENT): raise e return False
Check if a file exists Unlike os.path.exists, this throws an exception if there is an error checking if the file exists (for example, if there is a perms error on the parent dir). Returns: bool: True if the file exists; False if not.
synapse/config/_base.py
path_exists
jkanefendt/synapse
7
python
def path_exists(file_path): 'Check if a file exists\n\n Unlike os.path.exists, this throws an exception if there is an error\n checking if the file exists (for example, if there is a perms error on\n the parent dir).\n\n Returns:\n bool: True if the file exists; False if not.\n ' try: os.stat(file_path) return True except OSError as e: if (e.errno != errno.ENOENT): raise e return False
def path_exists(file_path): 'Check if a file exists\n\n Unlike os.path.exists, this throws an exception if there is an error\n checking if the file exists (for example, if there is a perms error on\n the parent dir).\n\n Returns:\n bool: True if the file exists; False if not.\n ' try: os.stat(file_path) return True except OSError as e: if (e.errno != errno.ENOENT): raise e return False<|docstring|>Check if a file exists Unlike os.path.exists, this throws an exception if there is an error checking if the file exists (for example, if there is a perms error on the parent dir). Returns: bool: True if the file exists; False if not.<|endoftext|>
1c82af3d24f0ec7a8b4b9f0b4365509710386c9e0d8feb57a07638da617e9c49
def read_config_files(config_files): 'Read the config files into a dict\n\n Args:\n config_files (iterable[str]): A list of the config files to read\n\n Returns: dict\n ' specified_config = {} for config_file in config_files: with open(config_file) as file_stream: yaml_config = yaml.safe_load(file_stream) if (not isinstance(yaml_config, dict)): err = "File %r is empty or doesn't parse into a key-value map. IGNORING." print((err % (config_file,))) continue specified_config.update(yaml_config) if ('server_name' not in specified_config): raise ConfigError(MISSING_SERVER_NAME) if ('report_stats' not in specified_config): raise ConfigError(((MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + '\n') + MISSING_REPORT_STATS_SPIEL)) return specified_config
Read the config files into a dict Args: config_files (iterable[str]): A list of the config files to read Returns: dict
synapse/config/_base.py
read_config_files
jkanefendt/synapse
7
python
def read_config_files(config_files): 'Read the config files into a dict\n\n Args:\n config_files (iterable[str]): A list of the config files to read\n\n Returns: dict\n ' specified_config = {} for config_file in config_files: with open(config_file) as file_stream: yaml_config = yaml.safe_load(file_stream) if (not isinstance(yaml_config, dict)): err = "File %r is empty or doesn't parse into a key-value map. IGNORING." print((err % (config_file,))) continue specified_config.update(yaml_config) if ('server_name' not in specified_config): raise ConfigError(MISSING_SERVER_NAME) if ('report_stats' not in specified_config): raise ConfigError(((MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + '\n') + MISSING_REPORT_STATS_SPIEL)) return specified_config
def read_config_files(config_files): 'Read the config files into a dict\n\n Args:\n config_files (iterable[str]): A list of the config files to read\n\n Returns: dict\n ' specified_config = {} for config_file in config_files: with open(config_file) as file_stream: yaml_config = yaml.safe_load(file_stream) if (not isinstance(yaml_config, dict)): err = "File %r is empty or doesn't parse into a key-value map. IGNORING." print((err % (config_file,))) continue specified_config.update(yaml_config) if ('server_name' not in specified_config): raise ConfigError(MISSING_SERVER_NAME) if ('report_stats' not in specified_config): raise ConfigError(((MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + '\n') + MISSING_REPORT_STATS_SPIEL)) return specified_config<|docstring|>Read the config files into a dict Args: config_files (iterable[str]): A list of the config files to read Returns: dict<|endoftext|>
16700e488f88402720806357c4b6c87b59c9f9b9c9b9f6fec50f47edac856285
def find_config_files(search_paths): 'Finds config files using a list of search paths. If a path is a file\n then that file path is added to the list. If a search path is a directory\n then all the "*.yaml" files in that directory are added to the list in\n sorted order.\n\n Args:\n search_paths(list(str)): A list of paths to search.\n\n Returns:\n list(str): A list of file paths.\n ' config_files = [] if search_paths: for config_path in search_paths: if os.path.isdir(config_path): files = [] for entry in os.listdir(config_path): entry_path = os.path.join(config_path, entry) if (not os.path.isfile(entry_path)): err = 'Found subdirectory in config directory: %r. IGNORING.' print((err % (entry_path,))) continue if (not entry.endswith('.yaml')): err = "Found file in config directory that does not end in '.yaml': %r. IGNORING." print((err % (entry_path,))) continue files.append(entry_path) config_files.extend(sorted(files)) else: config_files.append(config_path) return config_files
Finds config files using a list of search paths. If a path is a file then that file path is added to the list. If a search path is a directory then all the "*.yaml" files in that directory are added to the list in sorted order. Args: search_paths(list(str)): A list of paths to search. Returns: list(str): A list of file paths.
synapse/config/_base.py
find_config_files
jkanefendt/synapse
7
python
def find_config_files(search_paths): 'Finds config files using a list of search paths. If a path is a file\n then that file path is added to the list. If a search path is a directory\n then all the "*.yaml" files in that directory are added to the list in\n sorted order.\n\n Args:\n search_paths(list(str)): A list of paths to search.\n\n Returns:\n list(str): A list of file paths.\n ' config_files = [] if search_paths: for config_path in search_paths: if os.path.isdir(config_path): files = [] for entry in os.listdir(config_path): entry_path = os.path.join(config_path, entry) if (not os.path.isfile(entry_path)): err = 'Found subdirectory in config directory: %r. IGNORING.' print((err % (entry_path,))) continue if (not entry.endswith('.yaml')): err = "Found file in config directory that does not end in '.yaml': %r. IGNORING." print((err % (entry_path,))) continue files.append(entry_path) config_files.extend(sorted(files)) else: config_files.append(config_path) return config_files
def find_config_files(search_paths): 'Finds config files using a list of search paths. If a path is a file\n then that file path is added to the list. If a search path is a directory\n then all the "*.yaml" files in that directory are added to the list in\n sorted order.\n\n Args:\n search_paths(list(str)): A list of paths to search.\n\n Returns:\n list(str): A list of file paths.\n ' config_files = [] if search_paths: for config_path in search_paths: if os.path.isdir(config_path): files = [] for entry in os.listdir(config_path): entry_path = os.path.join(config_path, entry) if (not os.path.isfile(entry_path)): err = 'Found subdirectory in config directory: %r. IGNORING.' print((err % (entry_path,))) continue if (not entry.endswith('.yaml')): err = "Found file in config directory that does not end in '.yaml': %r. IGNORING." print((err % (entry_path,))) continue files.append(entry_path) config_files.extend(sorted(files)) else: config_files.append(config_path) return config_files<|docstring|>Finds config files using a list of search paths. If a path is a file then that file path is added to the list. If a search path is a directory then all the "*.yaml" files in that directory are added to the list in sorted order. Args: search_paths(list(str)): A list of paths to search. Returns: list(str): A list of file paths.<|endoftext|>
7d0857d448313f57d1a445214676df66904e36801bc09e2f4d2011b193864d21
def read_file(file_path: Any, config_path: Iterable[str]) -> str: 'Check the given file exists, and read it into a string\n\n If it does not, emit an error indicating the problem\n\n Args:\n file_path: the file to be read\n config_path: where in the configuration file_path came from, so that a useful\n error can be emitted if it does not exist.\n Returns:\n content of the file.\n Raises:\n ConfigError if there is a problem reading the file.\n ' if (not isinstance(file_path, str)): raise ConfigError('%r is not a string', config_path) try: os.stat(file_path) with open(file_path) as file_stream: return file_stream.read() except OSError as e: raise ConfigError(('Error accessing file %r' % (file_path,)), config_path) from e
Check the given file exists, and read it into a string If it does not, emit an error indicating the problem Args: file_path: the file to be read config_path: where in the configuration file_path came from, so that a useful error can be emitted if it does not exist. Returns: content of the file. Raises: ConfigError if there is a problem reading the file.
synapse/config/_base.py
read_file
jkanefendt/synapse
7
python
def read_file(file_path: Any, config_path: Iterable[str]) -> str: 'Check the given file exists, and read it into a string\n\n If it does not, emit an error indicating the problem\n\n Args:\n file_path: the file to be read\n config_path: where in the configuration file_path came from, so that a useful\n error can be emitted if it does not exist.\n Returns:\n content of the file.\n Raises:\n ConfigError if there is a problem reading the file.\n ' if (not isinstance(file_path, str)): raise ConfigError('%r is not a string', config_path) try: os.stat(file_path) with open(file_path) as file_stream: return file_stream.read() except OSError as e: raise ConfigError(('Error accessing file %r' % (file_path,)), config_path) from e
def read_file(file_path: Any, config_path: Iterable[str]) -> str: 'Check the given file exists, and read it into a string\n\n If it does not, emit an error indicating the problem\n\n Args:\n file_path: the file to be read\n config_path: where in the configuration file_path came from, so that a useful\n error can be emitted if it does not exist.\n Returns:\n content of the file.\n Raises:\n ConfigError if there is a problem reading the file.\n ' if (not isinstance(file_path, str)): raise ConfigError('%r is not a string', config_path) try: os.stat(file_path) with open(file_path) as file_stream: return file_stream.read() except OSError as e: raise ConfigError(('Error accessing file %r' % (file_path,)), config_path) from e<|docstring|>Check the given file exists, and read it into a string If it does not, emit an error indicating the problem Args: file_path: the file to be read config_path: where in the configuration file_path came from, so that a useful error can be emitted if it does not exist. Returns: content of the file. Raises: ConfigError if there is a problem reading the file.<|endoftext|>
39195b5986c9c3d71f6037b3e55e96b1cc39cb64584ea607a38f6688b457495d
def __getattr__(self, item: str) -> Any: '\n Try and fetch a configuration option that does not exist on this class.\n\n This is so that existing configs that rely on `self.value`, where value\n is actually from a different config section, continue to work.\n ' if (item in ['generate_config_section', 'read_config']): raise AttributeError(item) if (self.root is None): raise AttributeError(item) else: return self.root._get_unclassed_config(self.section, item)
Try and fetch a configuration option that does not exist on this class. This is so that existing configs that rely on `self.value`, where value is actually from a different config section, continue to work.
synapse/config/_base.py
__getattr__
jkanefendt/synapse
7
python
def __getattr__(self, item: str) -> Any: '\n Try and fetch a configuration option that does not exist on this class.\n\n This is so that existing configs that rely on `self.value`, where value\n is actually from a different config section, continue to work.\n ' if (item in ['generate_config_section', 'read_config']): raise AttributeError(item) if (self.root is None): raise AttributeError(item) else: return self.root._get_unclassed_config(self.section, item)
def __getattr__(self, item: str) -> Any: '\n Try and fetch a configuration option that does not exist on this class.\n\n This is so that existing configs that rely on `self.value`, where value\n is actually from a different config section, continue to work.\n ' if (item in ['generate_config_section', 'read_config']): raise AttributeError(item) if (self.root is None): raise AttributeError(item) else: return self.root._get_unclassed_config(self.section, item)<|docstring|>Try and fetch a configuration option that does not exist on this class. This is so that existing configs that rely on `self.value`, where value is actually from a different config section, continue to work.<|endoftext|>
954e8b38320ff404baab8b2c100b106cbe4ed9cdb8b845e6b552dfa6d145c63f
@staticmethod def parse_duration(value: Union[(str, int)]) -> int: "Convert a duration as a string or integer to a number of milliseconds.\n\n If an integer is provided it is treated as milliseconds and is unchanged.\n\n String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'.\n No suffix is treated as milliseconds.\n\n Args:\n value: The duration to parse.\n\n Returns:\n The number of milliseconds in the duration.\n " if isinstance(value, int): return value second = 1000 minute = (60 * second) hour = (60 * minute) day = (24 * hour) week = (7 * day) year = (365 * day) sizes = {'s': second, 'm': minute, 'h': hour, 'd': day, 'w': week, 'y': year} size = 1 suffix = value[(- 1)] if (suffix in sizes): value = value[:(- 1)] size = sizes[suffix] return (int(value) * size)
Convert a duration as a string or integer to a number of milliseconds. If an integer is provided it is treated as milliseconds and is unchanged. String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'. No suffix is treated as milliseconds. Args: value: The duration to parse. Returns: The number of milliseconds in the duration.
synapse/config/_base.py
parse_duration
jkanefendt/synapse
7
python
@staticmethod def parse_duration(value: Union[(str, int)]) -> int: "Convert a duration as a string or integer to a number of milliseconds.\n\n If an integer is provided it is treated as milliseconds and is unchanged.\n\n String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'.\n No suffix is treated as milliseconds.\n\n Args:\n value: The duration to parse.\n\n Returns:\n The number of milliseconds in the duration.\n " if isinstance(value, int): return value second = 1000 minute = (60 * second) hour = (60 * minute) day = (24 * hour) week = (7 * day) year = (365 * day) sizes = {'s': second, 'm': minute, 'h': hour, 'd': day, 'w': week, 'y': year} size = 1 suffix = value[(- 1)] if (suffix in sizes): value = value[:(- 1)] size = sizes[suffix] return (int(value) * size)
@staticmethod def parse_duration(value: Union[(str, int)]) -> int: "Convert a duration as a string or integer to a number of milliseconds.\n\n If an integer is provided it is treated as milliseconds and is unchanged.\n\n String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'.\n No suffix is treated as milliseconds.\n\n Args:\n value: The duration to parse.\n\n Returns:\n The number of milliseconds in the duration.\n " if isinstance(value, int): return value second = 1000 minute = (60 * second) hour = (60 * minute) day = (24 * hour) week = (7 * day) year = (365 * day) sizes = {'s': second, 'm': minute, 'h': hour, 'd': day, 'w': week, 'y': year} size = 1 suffix = value[(- 1)] if (suffix in sizes): value = value[:(- 1)] size = sizes[suffix] return (int(value) * size)<|docstring|>Convert a duration as a string or integer to a number of milliseconds. If an integer is provided it is treated as milliseconds and is unchanged. String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'. No suffix is treated as milliseconds. Args: value: The duration to parse. Returns: The number of milliseconds in the duration.<|endoftext|>
6d5a7eb7a93cac4420ede70c9e74d714e990bf83f016f6444446ad8a055172c2
@classmethod def read_file(cls, file_path, config_name): 'Deprecated: call read_file directly' return read_file(file_path, (config_name,))
Deprecated: call read_file directly
synapse/config/_base.py
read_file
jkanefendt/synapse
7
python
@classmethod def read_file(cls, file_path, config_name): return read_file(file_path, (config_name,))
@classmethod def read_file(cls, file_path, config_name): return read_file(file_path, (config_name,))<|docstring|>Deprecated: call read_file directly<|endoftext|>
a7045214aa723a653403dc0a881f12c9da81fce258193bae4554061bae22af8c
def read_template(self, filename: str) -> jinja2.Template: "Load a template file from disk.\n\n This function will attempt to load the given template from the default Synapse\n template directory.\n\n Files read are treated as Jinja templates. The templates is not rendered yet\n and has autoescape enabled.\n\n Args:\n filename: A template filename to read.\n\n Raises:\n ConfigError: if the file's path is incorrect or otherwise cannot be read.\n\n Returns:\n A jinja2 template.\n " return self.read_templates([filename])[0]
Load a template file from disk. This function will attempt to load the given template from the default Synapse template directory. Files read are treated as Jinja templates. The templates is not rendered yet and has autoescape enabled. Args: filename: A template filename to read. Raises: ConfigError: if the file's path is incorrect or otherwise cannot be read. Returns: A jinja2 template.
synapse/config/_base.py
read_template
jkanefendt/synapse
7
python
def read_template(self, filename: str) -> jinja2.Template: "Load a template file from disk.\n\n This function will attempt to load the given template from the default Synapse\n template directory.\n\n Files read are treated as Jinja templates. The templates is not rendered yet\n and has autoescape enabled.\n\n Args:\n filename: A template filename to read.\n\n Raises:\n ConfigError: if the file's path is incorrect or otherwise cannot be read.\n\n Returns:\n A jinja2 template.\n " return self.read_templates([filename])[0]
def read_template(self, filename: str) -> jinja2.Template: "Load a template file from disk.\n\n This function will attempt to load the given template from the default Synapse\n template directory.\n\n Files read are treated as Jinja templates. The templates is not rendered yet\n and has autoescape enabled.\n\n Args:\n filename: A template filename to read.\n\n Raises:\n ConfigError: if the file's path is incorrect or otherwise cannot be read.\n\n Returns:\n A jinja2 template.\n " return self.read_templates([filename])[0]<|docstring|>Load a template file from disk. This function will attempt to load the given template from the default Synapse template directory. Files read are treated as Jinja templates. The templates is not rendered yet and has autoescape enabled. Args: filename: A template filename to read. Raises: ConfigError: if the file's path is incorrect or otherwise cannot be read. Returns: A jinja2 template.<|endoftext|>
db6fe0781cef0e2b1f43f0a53550c0b51c16956d093f09c726d21080cb311699
def read_templates(self, filenames: List[str], custom_template_directories: Optional[Iterable[str]]=None) -> List[jinja2.Template]: "Load a list of template files from disk using the given variables.\n\n This function will attempt to load the given templates from the default Synapse\n template directory. If `custom_template_directories` is supplied, any directory\n in this list is tried (in the order they appear in the list) before trying\n Synapse's default directory.\n\n Files read are treated as Jinja templates. The templates are not rendered yet\n and have autoescape enabled.\n\n Args:\n filenames: A list of template filenames to read.\n\n custom_template_directories: A list of directory to try to look for the\n templates before using the default Synapse template directory instead.\n\n Raises:\n ConfigError: if the file's path is incorrect or otherwise cannot be read.\n\n Returns:\n A list of jinja2 templates.\n " search_directories = [] if (custom_template_directories is not None): for custom_template_directory in custom_template_directories: if (not self.path_exists(custom_template_directory)): raise ConfigError(('Configured template directory does not exist: %s' % (custom_template_directory,))) search_directories.append(custom_template_directory) search_directories.append(self.default_template_dir) loader = jinja2.FileSystemLoader(search_directories) env = jinja2.Environment(loader=loader, autoescape=jinja2.select_autoescape()) env.filters.update({'format_ts': _format_ts_filter, 'mxc_to_http': _create_mxc_to_http_filter(self.public_baseurl)}) return [env.get_template(filename) for filename in filenames]
Load a list of template files from disk using the given variables. This function will attempt to load the given templates from the default Synapse template directory. If `custom_template_directories` is supplied, any directory in this list is tried (in the order they appear in the list) before trying Synapse's default directory. Files read are treated as Jinja templates. The templates are not rendered yet and have autoescape enabled. Args: filenames: A list of template filenames to read. custom_template_directories: A list of directory to try to look for the templates before using the default Synapse template directory instead. Raises: ConfigError: if the file's path is incorrect or otherwise cannot be read. Returns: A list of jinja2 templates.
synapse/config/_base.py
read_templates
jkanefendt/synapse
7
python
def read_templates(self, filenames: List[str], custom_template_directories: Optional[Iterable[str]]=None) -> List[jinja2.Template]: "Load a list of template files from disk using the given variables.\n\n This function will attempt to load the given templates from the default Synapse\n template directory. If `custom_template_directories` is supplied, any directory\n in this list is tried (in the order they appear in the list) before trying\n Synapse's default directory.\n\n Files read are treated as Jinja templates. The templates are not rendered yet\n and have autoescape enabled.\n\n Args:\n filenames: A list of template filenames to read.\n\n custom_template_directories: A list of directory to try to look for the\n templates before using the default Synapse template directory instead.\n\n Raises:\n ConfigError: if the file's path is incorrect or otherwise cannot be read.\n\n Returns:\n A list of jinja2 templates.\n " search_directories = [] if (custom_template_directories is not None): for custom_template_directory in custom_template_directories: if (not self.path_exists(custom_template_directory)): raise ConfigError(('Configured template directory does not exist: %s' % (custom_template_directory,))) search_directories.append(custom_template_directory) search_directories.append(self.default_template_dir) loader = jinja2.FileSystemLoader(search_directories) env = jinja2.Environment(loader=loader, autoescape=jinja2.select_autoescape()) env.filters.update({'format_ts': _format_ts_filter, 'mxc_to_http': _create_mxc_to_http_filter(self.public_baseurl)}) return [env.get_template(filename) for filename in filenames]
def read_templates(self, filenames: List[str], custom_template_directories: Optional[Iterable[str]]=None) -> List[jinja2.Template]: "Load a list of template files from disk using the given variables.\n\n This function will attempt to load the given templates from the default Synapse\n template directory. If `custom_template_directories` is supplied, any directory\n in this list is tried (in the order they appear in the list) before trying\n Synapse's default directory.\n\n Files read are treated as Jinja templates. The templates are not rendered yet\n and have autoescape enabled.\n\n Args:\n filenames: A list of template filenames to read.\n\n custom_template_directories: A list of directory to try to look for the\n templates before using the default Synapse template directory instead.\n\n Raises:\n ConfigError: if the file's path is incorrect or otherwise cannot be read.\n\n Returns:\n A list of jinja2 templates.\n " search_directories = [] if (custom_template_directories is not None): for custom_template_directory in custom_template_directories: if (not self.path_exists(custom_template_directory)): raise ConfigError(('Configured template directory does not exist: %s' % (custom_template_directory,))) search_directories.append(custom_template_directory) search_directories.append(self.default_template_dir) loader = jinja2.FileSystemLoader(search_directories) env = jinja2.Environment(loader=loader, autoescape=jinja2.select_autoescape()) env.filters.update({'format_ts': _format_ts_filter, 'mxc_to_http': _create_mxc_to_http_filter(self.public_baseurl)}) return [env.get_template(filename) for filename in filenames]<|docstring|>Load a list of template files from disk using the given variables. This function will attempt to load the given templates from the default Synapse template directory. If `custom_template_directories` is supplied, any directory in this list is tried (in the order they appear in the list) before trying Synapse's default directory. Files read are treated as Jinja templates. The templates are not rendered yet and have autoescape enabled. Args: filenames: A list of template filenames to read. custom_template_directories: A list of directory to try to look for the templates before using the default Synapse template directory instead. Raises: ConfigError: if the file's path is incorrect or otherwise cannot be read. Returns: A list of jinja2 templates.<|endoftext|>
179d636d0d83036fdf5e6d26c35a1e757f2ba826e7b6021a99625771cd15481a
def __getattr__(self, item: str) -> Any: '\n Redirect lookups on this object either to config objects, or values on\n config objects, so that `config.tls.blah` works, as well as legacy uses\n of things like `config.server_name`. It will first look up the config\n section name, and then values on those config classes.\n ' if (item in self._configs.keys()): return self._configs[item] return self._get_unclassed_config(None, item)
Redirect lookups on this object either to config objects, or values on config objects, so that `config.tls.blah` works, as well as legacy uses of things like `config.server_name`. It will first look up the config section name, and then values on those config classes.
synapse/config/_base.py
__getattr__
jkanefendt/synapse
7
python
def __getattr__(self, item: str) -> Any: '\n Redirect lookups on this object either to config objects, or values on\n config objects, so that `config.tls.blah` works, as well as legacy uses\n of things like `config.server_name`. It will first look up the config\n section name, and then values on those config classes.\n ' if (item in self._configs.keys()): return self._configs[item] return self._get_unclassed_config(None, item)
def __getattr__(self, item: str) -> Any: '\n Redirect lookups on this object either to config objects, or values on\n config objects, so that `config.tls.blah` works, as well as legacy uses\n of things like `config.server_name`. It will first look up the config\n section name, and then values on those config classes.\n ' if (item in self._configs.keys()): return self._configs[item] return self._get_unclassed_config(None, item)<|docstring|>Redirect lookups on this object either to config objects, or values on config objects, so that `config.tls.blah` works, as well as legacy uses of things like `config.server_name`. It will first look up the config section name, and then values on those config classes.<|endoftext|>
1bc7779352e6322661be1a8117ba91af7813c76ff7595b1281f1dc816c2be8e5
def _get_unclassed_config(self, asking_section: Optional[str], item: str): '\n Fetch a config value from one of the instantiated config classes that\n has not been fetched directly.\n\n Args:\n asking_section: If this check is coming from a Config child, which\n one? This section will not be asked if it has the value.\n item: The configuration value key.\n\n Raises:\n AttributeError if no config classes have the config key. The body\n will contain what sections were checked.\n ' for (key, val) in self._configs.items(): if (key == asking_section): continue if (item in dir(val)): return getattr(val, item) raise AttributeError(item, ('not found in %s' % (list(self._configs.keys()),)))
Fetch a config value from one of the instantiated config classes that has not been fetched directly. Args: asking_section: If this check is coming from a Config child, which one? This section will not be asked if it has the value. item: The configuration value key. Raises: AttributeError if no config classes have the config key. The body will contain what sections were checked.
synapse/config/_base.py
_get_unclassed_config
jkanefendt/synapse
7
python
def _get_unclassed_config(self, asking_section: Optional[str], item: str): '\n Fetch a config value from one of the instantiated config classes that\n has not been fetched directly.\n\n Args:\n asking_section: If this check is coming from a Config child, which\n one? This section will not be asked if it has the value.\n item: The configuration value key.\n\n Raises:\n AttributeError if no config classes have the config key. The body\n will contain what sections were checked.\n ' for (key, val) in self._configs.items(): if (key == asking_section): continue if (item in dir(val)): return getattr(val, item) raise AttributeError(item, ('not found in %s' % (list(self._configs.keys()),)))
def _get_unclassed_config(self, asking_section: Optional[str], item: str): '\n Fetch a config value from one of the instantiated config classes that\n has not been fetched directly.\n\n Args:\n asking_section: If this check is coming from a Config child, which\n one? This section will not be asked if it has the value.\n item: The configuration value key.\n\n Raises:\n AttributeError if no config classes have the config key. The body\n will contain what sections were checked.\n ' for (key, val) in self._configs.items(): if (key == asking_section): continue if (item in dir(val)): return getattr(val, item) raise AttributeError(item, ('not found in %s' % (list(self._configs.keys()),)))<|docstring|>Fetch a config value from one of the instantiated config classes that has not been fetched directly. Args: asking_section: If this check is coming from a Config child, which one? This section will not be asked if it has the value. item: The configuration value key. Raises: AttributeError if no config classes have the config key. The body will contain what sections were checked.<|endoftext|>
502c30fcd85324a04e1ebf210c81384a3854e711494925135c37487e04a2a6b1
def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[(str, Any)]: '\n Invoke a function on all instantiated config objects this RootConfig is\n configured to use.\n\n Args:\n func_name: Name of function to invoke\n *args\n **kwargs\n Returns:\n ordered dictionary of config section name and the result of the\n function from it.\n ' res = OrderedDict() for (name, config) in self._configs.items(): if hasattr(config, func_name): res[name] = getattr(config, func_name)(*args, **kwargs) return res
Invoke a function on all instantiated config objects this RootConfig is configured to use. Args: func_name: Name of function to invoke *args **kwargs Returns: ordered dictionary of config section name and the result of the function from it.
synapse/config/_base.py
invoke_all
jkanefendt/synapse
7
python
def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[(str, Any)]: '\n Invoke a function on all instantiated config objects this RootConfig is\n configured to use.\n\n Args:\n func_name: Name of function to invoke\n *args\n **kwargs\n Returns:\n ordered dictionary of config section name and the result of the\n function from it.\n ' res = OrderedDict() for (name, config) in self._configs.items(): if hasattr(config, func_name): res[name] = getattr(config, func_name)(*args, **kwargs) return res
def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[(str, Any)]: '\n Invoke a function on all instantiated config objects this RootConfig is\n configured to use.\n\n Args:\n func_name: Name of function to invoke\n *args\n **kwargs\n Returns:\n ordered dictionary of config section name and the result of the\n function from it.\n ' res = OrderedDict() for (name, config) in self._configs.items(): if hasattr(config, func_name): res[name] = getattr(config, func_name)(*args, **kwargs) return res<|docstring|>Invoke a function on all instantiated config objects this RootConfig is configured to use. Args: func_name: Name of function to invoke *args **kwargs Returns: ordered dictionary of config section name and the result of the function from it.<|endoftext|>
3bf6716ade30db296711756566275bf0e6bfa3ce50d0f57eb8a1e943d4a562f2
@classmethod def invoke_all_static(cls, func_name: str, *args, **kwargs): '\n Invoke a static function on config objects this RootConfig is\n configured to use.\n\n Args:\n func_name: Name of function to invoke\n *args\n **kwargs\n Returns:\n ordered dictionary of config section name and the result of the\n function from it.\n ' for config in cls.config_classes: if hasattr(config, func_name): getattr(config, func_name)(*args, **kwargs)
Invoke a static function on config objects this RootConfig is configured to use. Args: func_name: Name of function to invoke *args **kwargs Returns: ordered dictionary of config section name and the result of the function from it.
synapse/config/_base.py
invoke_all_static
jkanefendt/synapse
7
python
@classmethod def invoke_all_static(cls, func_name: str, *args, **kwargs): '\n Invoke a static function on config objects this RootConfig is\n configured to use.\n\n Args:\n func_name: Name of function to invoke\n *args\n **kwargs\n Returns:\n ordered dictionary of config section name and the result of the\n function from it.\n ' for config in cls.config_classes: if hasattr(config, func_name): getattr(config, func_name)(*args, **kwargs)
@classmethod def invoke_all_static(cls, func_name: str, *args, **kwargs): '\n Invoke a static function on config objects this RootConfig is\n configured to use.\n\n Args:\n func_name: Name of function to invoke\n *args\n **kwargs\n Returns:\n ordered dictionary of config section name and the result of the\n function from it.\n ' for config in cls.config_classes: if hasattr(config, func_name): getattr(config, func_name)(*args, **kwargs)<|docstring|>Invoke a static function on config objects this RootConfig is configured to use. Args: func_name: Name of function to invoke *args **kwargs Returns: ordered dictionary of config section name and the result of the function from it.<|endoftext|>
7f4f4b81955f5a99f636379be89ccdf9950cf9f2c2fd761ef943636501fa4a6d
def generate_config(self, config_dir_path, data_dir_path, server_name, generate_secrets=False, report_stats=None, open_private_ports=False, listeners=None, tls_certificate_path=None, tls_private_key_path=None): '\n Build a default configuration file\n\n This is used when the user explicitly asks us to generate a config file\n (eg with --generate_config).\n\n Args:\n config_dir_path (str): The path where the config files are kept. Used to\n create filenames for things like the log config and the signing key.\n\n data_dir_path (str): The path where the data files are kept. Used to create\n filenames for things like the database and media store.\n\n server_name (str): The server name. Used to initialise the server_name\n config param, but also used in the names of some of the config files.\n\n generate_secrets (bool): True if we should generate new secrets for things\n like the macaroon_secret_key. If False, these parameters will be left\n unset.\n\n report_stats (bool|None): Initial setting for the report_stats setting.\n If None, report_stats will be left unset.\n\n open_private_ports (bool): True to leave private ports (such as the non-TLS\n HTTP listener) open to the internet.\n\n listeners (list(dict)|None): A list of descriptions of the listeners\n synapse should start with each of which specifies a port (str), a list of\n resources (list(str)), tls (bool) and type (str). For example:\n [{\n "port": 8448,\n "resources": [{"names": ["federation"]}],\n "tls": True,\n "type": "http",\n },\n {\n "port": 443,\n "resources": [{"names": ["client"]}],\n "tls": False,\n "type": "http",\n }],\n\n\n database (str|None): The database type to configure, either `psycog2`\n or `sqlite3`.\n\n tls_certificate_path (str|None): The path to the tls certificate.\n\n tls_private_key_path (str|None): The path to the tls private key.\n\n Returns:\n str: the yaml config file\n ' return (CONFIG_FILE_HEADER + '\n\n'.join((dedent(conf) for conf in self.invoke_all('generate_config_section', config_dir_path=config_dir_path, data_dir_path=data_dir_path, server_name=server_name, generate_secrets=generate_secrets, report_stats=report_stats, open_private_ports=open_private_ports, listeners=listeners, tls_certificate_path=tls_certificate_path, tls_private_key_path=tls_private_key_path).values())))
Build a default configuration file This is used when the user explicitly asks us to generate a config file (eg with --generate_config). Args: config_dir_path (str): The path where the config files are kept. Used to create filenames for things like the log config and the signing key. data_dir_path (str): The path where the data files are kept. Used to create filenames for things like the database and media store. server_name (str): The server name. Used to initialise the server_name config param, but also used in the names of some of the config files. generate_secrets (bool): True if we should generate new secrets for things like the macaroon_secret_key. If False, these parameters will be left unset. report_stats (bool|None): Initial setting for the report_stats setting. If None, report_stats will be left unset. open_private_ports (bool): True to leave private ports (such as the non-TLS HTTP listener) open to the internet. listeners (list(dict)|None): A list of descriptions of the listeners synapse should start with each of which specifies a port (str), a list of resources (list(str)), tls (bool) and type (str). For example: [{ "port": 8448, "resources": [{"names": ["federation"]}], "tls": True, "type": "http", }, { "port": 443, "resources": [{"names": ["client"]}], "tls": False, "type": "http", }], database (str|None): The database type to configure, either `psycog2` or `sqlite3`. tls_certificate_path (str|None): The path to the tls certificate. tls_private_key_path (str|None): The path to the tls private key. Returns: str: the yaml config file
synapse/config/_base.py
generate_config
jkanefendt/synapse
7
python
def generate_config(self, config_dir_path, data_dir_path, server_name, generate_secrets=False, report_stats=None, open_private_ports=False, listeners=None, tls_certificate_path=None, tls_private_key_path=None): '\n Build a default configuration file\n\n This is used when the user explicitly asks us to generate a config file\n (eg with --generate_config).\n\n Args:\n config_dir_path (str): The path where the config files are kept. Used to\n create filenames for things like the log config and the signing key.\n\n data_dir_path (str): The path where the data files are kept. Used to create\n filenames for things like the database and media store.\n\n server_name (str): The server name. Used to initialise the server_name\n config param, but also used in the names of some of the config files.\n\n generate_secrets (bool): True if we should generate new secrets for things\n like the macaroon_secret_key. If False, these parameters will be left\n unset.\n\n report_stats (bool|None): Initial setting for the report_stats setting.\n If None, report_stats will be left unset.\n\n open_private_ports (bool): True to leave private ports (such as the non-TLS\n HTTP listener) open to the internet.\n\n listeners (list(dict)|None): A list of descriptions of the listeners\n synapse should start with each of which specifies a port (str), a list of\n resources (list(str)), tls (bool) and type (str). For example:\n [{\n "port": 8448,\n "resources": [{"names": ["federation"]}],\n "tls": True,\n "type": "http",\n },\n {\n "port": 443,\n "resources": [{"names": ["client"]}],\n "tls": False,\n "type": "http",\n }],\n\n\n database (str|None): The database type to configure, either `psycog2`\n or `sqlite3`.\n\n tls_certificate_path (str|None): The path to the tls certificate.\n\n tls_private_key_path (str|None): The path to the tls private key.\n\n Returns:\n str: the yaml config file\n ' return (CONFIG_FILE_HEADER + '\n\n'.join((dedent(conf) for conf in self.invoke_all('generate_config_section', config_dir_path=config_dir_path, data_dir_path=data_dir_path, server_name=server_name, generate_secrets=generate_secrets, report_stats=report_stats, open_private_ports=open_private_ports, listeners=listeners, tls_certificate_path=tls_certificate_path, tls_private_key_path=tls_private_key_path).values())))
def generate_config(self, config_dir_path, data_dir_path, server_name, generate_secrets=False, report_stats=None, open_private_ports=False, listeners=None, tls_certificate_path=None, tls_private_key_path=None): '\n Build a default configuration file\n\n This is used when the user explicitly asks us to generate a config file\n (eg with --generate_config).\n\n Args:\n config_dir_path (str): The path where the config files are kept. Used to\n create filenames for things like the log config and the signing key.\n\n data_dir_path (str): The path where the data files are kept. Used to create\n filenames for things like the database and media store.\n\n server_name (str): The server name. Used to initialise the server_name\n config param, but also used in the names of some of the config files.\n\n generate_secrets (bool): True if we should generate new secrets for things\n like the macaroon_secret_key. If False, these parameters will be left\n unset.\n\n report_stats (bool|None): Initial setting for the report_stats setting.\n If None, report_stats will be left unset.\n\n open_private_ports (bool): True to leave private ports (such as the non-TLS\n HTTP listener) open to the internet.\n\n listeners (list(dict)|None): A list of descriptions of the listeners\n synapse should start with each of which specifies a port (str), a list of\n resources (list(str)), tls (bool) and type (str). For example:\n [{\n "port": 8448,\n "resources": [{"names": ["federation"]}],\n "tls": True,\n "type": "http",\n },\n {\n "port": 443,\n "resources": [{"names": ["client"]}],\n "tls": False,\n "type": "http",\n }],\n\n\n database (str|None): The database type to configure, either `psycog2`\n or `sqlite3`.\n\n tls_certificate_path (str|None): The path to the tls certificate.\n\n tls_private_key_path (str|None): The path to the tls private key.\n\n Returns:\n str: the yaml config file\n ' return (CONFIG_FILE_HEADER + '\n\n'.join((dedent(conf) for conf in self.invoke_all('generate_config_section', config_dir_path=config_dir_path, data_dir_path=data_dir_path, server_name=server_name, generate_secrets=generate_secrets, report_stats=report_stats, open_private_ports=open_private_ports, listeners=listeners, tls_certificate_path=tls_certificate_path, tls_private_key_path=tls_private_key_path).values())))<|docstring|>Build a default configuration file This is used when the user explicitly asks us to generate a config file (eg with --generate_config). Args: config_dir_path (str): The path where the config files are kept. Used to create filenames for things like the log config and the signing key. data_dir_path (str): The path where the data files are kept. Used to create filenames for things like the database and media store. server_name (str): The server name. Used to initialise the server_name config param, but also used in the names of some of the config files. generate_secrets (bool): True if we should generate new secrets for things like the macaroon_secret_key. If False, these parameters will be left unset. report_stats (bool|None): Initial setting for the report_stats setting. If None, report_stats will be left unset. open_private_ports (bool): True to leave private ports (such as the non-TLS HTTP listener) open to the internet. listeners (list(dict)|None): A list of descriptions of the listeners synapse should start with each of which specifies a port (str), a list of resources (list(str)), tls (bool) and type (str). For example: [{ "port": 8448, "resources": [{"names": ["federation"]}], "tls": True, "type": "http", }, { "port": 443, "resources": [{"names": ["client"]}], "tls": False, "type": "http", }], database (str|None): The database type to configure, either `psycog2` or `sqlite3`. tls_certificate_path (str|None): The path to the tls certificate. tls_private_key_path (str|None): The path to the tls private key. Returns: str: the yaml config file<|endoftext|>
42a14bf155c072c1fe40509affd2160464b6fe6cf16fa2c3d0ead78d033d21e7
@classmethod def load_config(cls, description, argv): "Parse the commandline and config files\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Returns: Config object.\n " config_parser = argparse.ArgumentParser(description=description) cls.add_arguments_to_parser(config_parser) (obj, _) = cls.load_config_with_parser(config_parser, argv) return obj
Parse the commandline and config files Doesn't support config-file-generation: used by the worker apps. Returns: Config object.
synapse/config/_base.py
load_config
jkanefendt/synapse
7
python
@classmethod def load_config(cls, description, argv): "Parse the commandline and config files\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Returns: Config object.\n " config_parser = argparse.ArgumentParser(description=description) cls.add_arguments_to_parser(config_parser) (obj, _) = cls.load_config_with_parser(config_parser, argv) return obj
@classmethod def load_config(cls, description, argv): "Parse the commandline and config files\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Returns: Config object.\n " config_parser = argparse.ArgumentParser(description=description) cls.add_arguments_to_parser(config_parser) (obj, _) = cls.load_config_with_parser(config_parser, argv) return obj<|docstring|>Parse the commandline and config files Doesn't support config-file-generation: used by the worker apps. Returns: Config object.<|endoftext|>
0c19b28b491407973de60c7533df49d34ceda4d87bf1b7ed1d6fbd3c4956192b
@classmethod def add_arguments_to_parser(cls, config_parser): "Adds all the config flags to an ArgumentParser.\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Used for workers where we want to add extra flags/subcommands.\n\n Args:\n config_parser (ArgumentParser): App description\n " config_parser.add_argument('-c', '--config-path', action='append', metavar='CONFIG_FILE', help='Specify config file. Can be given multiple times and may specify directories containing *.yaml files.') config_parser.add_argument('--keys-directory', metavar='DIRECTORY', help='Where files such as certs and signing keys are stored when their location is not given explicitly in the config. Defaults to the directory containing the last config file') cls.invoke_all_static('add_arguments', config_parser)
Adds all the config flags to an ArgumentParser. Doesn't support config-file-generation: used by the worker apps. Used for workers where we want to add extra flags/subcommands. Args: config_parser (ArgumentParser): App description
synapse/config/_base.py
add_arguments_to_parser
jkanefendt/synapse
7
python
@classmethod def add_arguments_to_parser(cls, config_parser): "Adds all the config flags to an ArgumentParser.\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Used for workers where we want to add extra flags/subcommands.\n\n Args:\n config_parser (ArgumentParser): App description\n " config_parser.add_argument('-c', '--config-path', action='append', metavar='CONFIG_FILE', help='Specify config file. Can be given multiple times and may specify directories containing *.yaml files.') config_parser.add_argument('--keys-directory', metavar='DIRECTORY', help='Where files such as certs and signing keys are stored when their location is not given explicitly in the config. Defaults to the directory containing the last config file') cls.invoke_all_static('add_arguments', config_parser)
@classmethod def add_arguments_to_parser(cls, config_parser): "Adds all the config flags to an ArgumentParser.\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Used for workers where we want to add extra flags/subcommands.\n\n Args:\n config_parser (ArgumentParser): App description\n " config_parser.add_argument('-c', '--config-path', action='append', metavar='CONFIG_FILE', help='Specify config file. Can be given multiple times and may specify directories containing *.yaml files.') config_parser.add_argument('--keys-directory', metavar='DIRECTORY', help='Where files such as certs and signing keys are stored when their location is not given explicitly in the config. Defaults to the directory containing the last config file') cls.invoke_all_static('add_arguments', config_parser)<|docstring|>Adds all the config flags to an ArgumentParser. Doesn't support config-file-generation: used by the worker apps. Used for workers where we want to add extra flags/subcommands. Args: config_parser (ArgumentParser): App description<|endoftext|>
41c3634d5655430d0f420c85d7eb5195c1f05447741263691a0b343e5d3596a3
@classmethod def load_config_with_parser(cls, parser, argv): "Parse the commandline and config files with the given parser\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Used for workers where we want to add extra flags/subcommands.\n\n Args:\n parser (ArgumentParser)\n argv (list[str])\n\n Returns:\n tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed\n config object and the parsed argparse.Namespace object from\n `parser.parse_args(..)`\n " obj = cls() config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if (not config_files): parser.error('Must supply a config file.') if config_args.keys_directory: config_dir_path = config_args.keys_directory else: config_dir_path = os.path.dirname(config_files[(- 1)]) config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() config_dict = read_config_files(config_files) obj.parse_config_dict(config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path) obj.invoke_all('read_arguments', config_args) return (obj, config_args)
Parse the commandline and config files with the given parser Doesn't support config-file-generation: used by the worker apps. Used for workers where we want to add extra flags/subcommands. Args: parser (ArgumentParser) argv (list[str]) Returns: tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed config object and the parsed argparse.Namespace object from `parser.parse_args(..)`
synapse/config/_base.py
load_config_with_parser
jkanefendt/synapse
7
python
@classmethod def load_config_with_parser(cls, parser, argv): "Parse the commandline and config files with the given parser\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Used for workers where we want to add extra flags/subcommands.\n\n Args:\n parser (ArgumentParser)\n argv (list[str])\n\n Returns:\n tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed\n config object and the parsed argparse.Namespace object from\n `parser.parse_args(..)`\n " obj = cls() config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if (not config_files): parser.error('Must supply a config file.') if config_args.keys_directory: config_dir_path = config_args.keys_directory else: config_dir_path = os.path.dirname(config_files[(- 1)]) config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() config_dict = read_config_files(config_files) obj.parse_config_dict(config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path) obj.invoke_all('read_arguments', config_args) return (obj, config_args)
@classmethod def load_config_with_parser(cls, parser, argv): "Parse the commandline and config files with the given parser\n\n Doesn't support config-file-generation: used by the worker apps.\n\n Used for workers where we want to add extra flags/subcommands.\n\n Args:\n parser (ArgumentParser)\n argv (list[str])\n\n Returns:\n tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed\n config object and the parsed argparse.Namespace object from\n `parser.parse_args(..)`\n " obj = cls() config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if (not config_files): parser.error('Must supply a config file.') if config_args.keys_directory: config_dir_path = config_args.keys_directory else: config_dir_path = os.path.dirname(config_files[(- 1)]) config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() config_dict = read_config_files(config_files) obj.parse_config_dict(config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path) obj.invoke_all('read_arguments', config_args) return (obj, config_args)<|docstring|>Parse the commandline and config files with the given parser Doesn't support config-file-generation: used by the worker apps. Used for workers where we want to add extra flags/subcommands. Args: parser (ArgumentParser) argv (list[str]) Returns: tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed config object and the parsed argparse.Namespace object from `parser.parse_args(..)`<|endoftext|>
063af347096f9dd39260fbda4dab0d55622fe4b5d46abb24dd0a1f8a1973a597
@classmethod def load_or_generate_config(cls, description, argv): 'Parse the commandline and config files\n\n Supports generation of config files, so is used for the main homeserver app.\n\n Returns: Config object, or None if --generate-config or --generate-keys was set\n ' parser = argparse.ArgumentParser(description=description) parser.add_argument('-c', '--config-path', action='append', metavar='CONFIG_FILE', help='Specify config file. Can be given multiple times and may specify directories containing *.yaml files.') generate_group = parser.add_argument_group('Config generation') generate_group.add_argument('--generate-config', action='store_true', help='Generate a config file, then exit.') generate_group.add_argument('--generate-missing-configs', '--generate-keys', action='store_true', help='Generate any missing additional config files, then exit.') generate_group.add_argument('-H', '--server-name', help='The server name to generate a config file for.') generate_group.add_argument('--report-stats', action='store', help='Whether the generated config reports anonymized usage statistics.', choices=['yes', 'no']) generate_group.add_argument('--config-directory', '--keys-directory', metavar='DIRECTORY', help='Specify where additional config files such as signing keys and log config should be stored. Defaults to the same directory as the last config file.') generate_group.add_argument('--data-directory', metavar='DIRECTORY', help='Specify where data such as the media store and database file should be stored. Defaults to the current working directory.') generate_group.add_argument('--open-private-ports', action='store_true', help='Leave private ports (such as the non-TLS HTTP listener) open to the internet. Do not use this unless you know what you are doing.') cls.invoke_all_static('add_arguments', parser) config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if (not config_files): parser.error('Must supply a config file.\nA config file can be automatically generated using "--generate-config -H SERVER_NAME -c CONFIG-FILE"') if config_args.config_directory: config_dir_path = config_args.config_directory else: config_dir_path = os.path.dirname(config_files[(- 1)]) config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() generate_missing_configs = config_args.generate_missing_configs obj = cls() if config_args.generate_config: if (config_args.report_stats is None): parser.error(('Please specify either --report-stats=yes or --report-stats=no\n\n' + MISSING_REPORT_STATS_SPIEL)) (config_path,) = config_files if (not path_exists(config_path)): print(('Generating config file %s' % (config_path,))) if config_args.data_directory: data_dir_path = config_args.data_directory else: data_dir_path = os.getcwd() data_dir_path = os.path.abspath(data_dir_path) server_name = config_args.server_name if (not server_name): raise ConfigError('Must specify a server_name to a generate config for. Pass -H server.name.') config_str = obj.generate_config(config_dir_path=config_dir_path, data_dir_path=data_dir_path, server_name=server_name, report_stats=(config_args.report_stats == 'yes'), generate_secrets=True, open_private_ports=config_args.open_private_ports) if (not path_exists(config_dir_path)): os.makedirs(config_dir_path) with open(config_path, 'w') as config_file: config_file.write(config_str) config_file.write('\n\n# vim:ft=yaml') config_dict = yaml.safe_load(config_str) obj.generate_missing_files(config_dict, config_dir_path) print(('A config file has been generated in %r for server name %r. Please review this file and customise it to your needs.' % (config_path, server_name))) return else: print(('Config file %r already exists. Generating any missing config files.' % (config_path,))) generate_missing_configs = True config_dict = read_config_files(config_files) if generate_missing_configs: obj.generate_missing_files(config_dict, config_dir_path) return None obj.parse_config_dict(config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path) obj.invoke_all('read_arguments', config_args) return obj
Parse the commandline and config files Supports generation of config files, so is used for the main homeserver app. Returns: Config object, or None if --generate-config or --generate-keys was set
synapse/config/_base.py
load_or_generate_config
jkanefendt/synapse
7
python
@classmethod def load_or_generate_config(cls, description, argv): 'Parse the commandline and config files\n\n Supports generation of config files, so is used for the main homeserver app.\n\n Returns: Config object, or None if --generate-config or --generate-keys was set\n ' parser = argparse.ArgumentParser(description=description) parser.add_argument('-c', '--config-path', action='append', metavar='CONFIG_FILE', help='Specify config file. Can be given multiple times and may specify directories containing *.yaml files.') generate_group = parser.add_argument_group('Config generation') generate_group.add_argument('--generate-config', action='store_true', help='Generate a config file, then exit.') generate_group.add_argument('--generate-missing-configs', '--generate-keys', action='store_true', help='Generate any missing additional config files, then exit.') generate_group.add_argument('-H', '--server-name', help='The server name to generate a config file for.') generate_group.add_argument('--report-stats', action='store', help='Whether the generated config reports anonymized usage statistics.', choices=['yes', 'no']) generate_group.add_argument('--config-directory', '--keys-directory', metavar='DIRECTORY', help='Specify where additional config files such as signing keys and log config should be stored. Defaults to the same directory as the last config file.') generate_group.add_argument('--data-directory', metavar='DIRECTORY', help='Specify where data such as the media store and database file should be stored. Defaults to the current working directory.') generate_group.add_argument('--open-private-ports', action='store_true', help='Leave private ports (such as the non-TLS HTTP listener) open to the internet. Do not use this unless you know what you are doing.') cls.invoke_all_static('add_arguments', parser) config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if (not config_files): parser.error('Must supply a config file.\nA config file can be automatically generated using "--generate-config -H SERVER_NAME -c CONFIG-FILE"') if config_args.config_directory: config_dir_path = config_args.config_directory else: config_dir_path = os.path.dirname(config_files[(- 1)]) config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() generate_missing_configs = config_args.generate_missing_configs obj = cls() if config_args.generate_config: if (config_args.report_stats is None): parser.error(('Please specify either --report-stats=yes or --report-stats=no\n\n' + MISSING_REPORT_STATS_SPIEL)) (config_path,) = config_files if (not path_exists(config_path)): print(('Generating config file %s' % (config_path,))) if config_args.data_directory: data_dir_path = config_args.data_directory else: data_dir_path = os.getcwd() data_dir_path = os.path.abspath(data_dir_path) server_name = config_args.server_name if (not server_name): raise ConfigError('Must specify a server_name to a generate config for. Pass -H server.name.') config_str = obj.generate_config(config_dir_path=config_dir_path, data_dir_path=data_dir_path, server_name=server_name, report_stats=(config_args.report_stats == 'yes'), generate_secrets=True, open_private_ports=config_args.open_private_ports) if (not path_exists(config_dir_path)): os.makedirs(config_dir_path) with open(config_path, 'w') as config_file: config_file.write(config_str) config_file.write('\n\n# vim:ft=yaml') config_dict = yaml.safe_load(config_str) obj.generate_missing_files(config_dict, config_dir_path) print(('A config file has been generated in %r for server name %r. Please review this file and customise it to your needs.' % (config_path, server_name))) return else: print(('Config file %r already exists. Generating any missing config files.' % (config_path,))) generate_missing_configs = True config_dict = read_config_files(config_files) if generate_missing_configs: obj.generate_missing_files(config_dict, config_dir_path) return None obj.parse_config_dict(config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path) obj.invoke_all('read_arguments', config_args) return obj
@classmethod def load_or_generate_config(cls, description, argv): 'Parse the commandline and config files\n\n Supports generation of config files, so is used for the main homeserver app.\n\n Returns: Config object, or None if --generate-config or --generate-keys was set\n ' parser = argparse.ArgumentParser(description=description) parser.add_argument('-c', '--config-path', action='append', metavar='CONFIG_FILE', help='Specify config file. Can be given multiple times and may specify directories containing *.yaml files.') generate_group = parser.add_argument_group('Config generation') generate_group.add_argument('--generate-config', action='store_true', help='Generate a config file, then exit.') generate_group.add_argument('--generate-missing-configs', '--generate-keys', action='store_true', help='Generate any missing additional config files, then exit.') generate_group.add_argument('-H', '--server-name', help='The server name to generate a config file for.') generate_group.add_argument('--report-stats', action='store', help='Whether the generated config reports anonymized usage statistics.', choices=['yes', 'no']) generate_group.add_argument('--config-directory', '--keys-directory', metavar='DIRECTORY', help='Specify where additional config files such as signing keys and log config should be stored. Defaults to the same directory as the last config file.') generate_group.add_argument('--data-directory', metavar='DIRECTORY', help='Specify where data such as the media store and database file should be stored. Defaults to the current working directory.') generate_group.add_argument('--open-private-ports', action='store_true', help='Leave private ports (such as the non-TLS HTTP listener) open to the internet. Do not use this unless you know what you are doing.') cls.invoke_all_static('add_arguments', parser) config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if (not config_files): parser.error('Must supply a config file.\nA config file can be automatically generated using "--generate-config -H SERVER_NAME -c CONFIG-FILE"') if config_args.config_directory: config_dir_path = config_args.config_directory else: config_dir_path = os.path.dirname(config_files[(- 1)]) config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() generate_missing_configs = config_args.generate_missing_configs obj = cls() if config_args.generate_config: if (config_args.report_stats is None): parser.error(('Please specify either --report-stats=yes or --report-stats=no\n\n' + MISSING_REPORT_STATS_SPIEL)) (config_path,) = config_files if (not path_exists(config_path)): print(('Generating config file %s' % (config_path,))) if config_args.data_directory: data_dir_path = config_args.data_directory else: data_dir_path = os.getcwd() data_dir_path = os.path.abspath(data_dir_path) server_name = config_args.server_name if (not server_name): raise ConfigError('Must specify a server_name to a generate config for. Pass -H server.name.') config_str = obj.generate_config(config_dir_path=config_dir_path, data_dir_path=data_dir_path, server_name=server_name, report_stats=(config_args.report_stats == 'yes'), generate_secrets=True, open_private_ports=config_args.open_private_ports) if (not path_exists(config_dir_path)): os.makedirs(config_dir_path) with open(config_path, 'w') as config_file: config_file.write(config_str) config_file.write('\n\n# vim:ft=yaml') config_dict = yaml.safe_load(config_str) obj.generate_missing_files(config_dict, config_dir_path) print(('A config file has been generated in %r for server name %r. Please review this file and customise it to your needs.' % (config_path, server_name))) return else: print(('Config file %r already exists. Generating any missing config files.' % (config_path,))) generate_missing_configs = True config_dict = read_config_files(config_files) if generate_missing_configs: obj.generate_missing_files(config_dict, config_dir_path) return None obj.parse_config_dict(config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path) obj.invoke_all('read_arguments', config_args) return obj<|docstring|>Parse the commandline and config files Supports generation of config files, so is used for the main homeserver app. Returns: Config object, or None if --generate-config or --generate-keys was set<|endoftext|>
f007bb0d127244dda217fb07f9f30d46659a5d90a392536ff1ffa1ccbe05a42f
def parse_config_dict(self, config_dict, config_dir_path=None, data_dir_path=None): 'Read the information from the config dict into this Config object.\n\n Args:\n config_dict (dict): Configuration data, as read from the yaml\n\n config_dir_path (str): The path where the config files are kept. Used to\n create filenames for things like the log config and the signing key.\n\n data_dir_path (str): The path where the data files are kept. Used to create\n filenames for things like the database and media store.\n ' self.invoke_all('read_config', config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path)
Read the information from the config dict into this Config object. Args: config_dict (dict): Configuration data, as read from the yaml config_dir_path (str): The path where the config files are kept. Used to create filenames for things like the log config and the signing key. data_dir_path (str): The path where the data files are kept. Used to create filenames for things like the database and media store.
synapse/config/_base.py
parse_config_dict
jkanefendt/synapse
7
python
def parse_config_dict(self, config_dict, config_dir_path=None, data_dir_path=None): 'Read the information from the config dict into this Config object.\n\n Args:\n config_dict (dict): Configuration data, as read from the yaml\n\n config_dir_path (str): The path where the config files are kept. Used to\n create filenames for things like the log config and the signing key.\n\n data_dir_path (str): The path where the data files are kept. Used to create\n filenames for things like the database and media store.\n ' self.invoke_all('read_config', config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path)
def parse_config_dict(self, config_dict, config_dir_path=None, data_dir_path=None): 'Read the information from the config dict into this Config object.\n\n Args:\n config_dict (dict): Configuration data, as read from the yaml\n\n config_dir_path (str): The path where the config files are kept. Used to\n create filenames for things like the log config and the signing key.\n\n data_dir_path (str): The path where the data files are kept. Used to create\n filenames for things like the database and media store.\n ' self.invoke_all('read_config', config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path)<|docstring|>Read the information from the config dict into this Config object. Args: config_dict (dict): Configuration data, as read from the yaml config_dir_path (str): The path where the config files are kept. Used to create filenames for things like the log config and the signing key. data_dir_path (str): The path where the data files are kept. Used to create filenames for things like the database and media store.<|endoftext|>
1143d0e0f2a7ad4b6b72de0e3331e8cd4e71bb9d71d21003a91924a9895691b1
def should_handle(self, instance_name: str, key: str) -> bool: 'Whether this instance is responsible for handling the given key.' if (not self.instances): return False return (self._get_instance(key) == instance_name)
Whether this instance is responsible for handling the given key.
synapse/config/_base.py
should_handle
jkanefendt/synapse
7
python
def should_handle(self, instance_name: str, key: str) -> bool: if (not self.instances): return False return (self._get_instance(key) == instance_name)
def should_handle(self, instance_name: str, key: str) -> bool: if (not self.instances): return False return (self._get_instance(key) == instance_name)<|docstring|>Whether this instance is responsible for handling the given key.<|endoftext|>
2da644953ba611132cc197437e821828ca92e32315e5837fb124bb4c2f18744d
def _get_instance(self, key: str) -> str: "Get the instance responsible for handling the given key.\n\n Note: For federation sending and pushers the config for which instance\n is sending is known only to the sender instance, so we don't expose this\n method by default.\n " if (not self.instances): raise Exception('Unknown worker') if (len(self.instances) == 1): return self.instances[0] dest_hash = sha256(key.encode('utf8')).digest() dest_int = int.from_bytes(dest_hash, byteorder='little') remainder = (dest_int % len(self.instances)) return self.instances[remainder]
Get the instance responsible for handling the given key. Note: For federation sending and pushers the config for which instance is sending is known only to the sender instance, so we don't expose this method by default.
synapse/config/_base.py
_get_instance
jkanefendt/synapse
7
python
def _get_instance(self, key: str) -> str: "Get the instance responsible for handling the given key.\n\n Note: For federation sending and pushers the config for which instance\n is sending is known only to the sender instance, so we don't expose this\n method by default.\n " if (not self.instances): raise Exception('Unknown worker') if (len(self.instances) == 1): return self.instances[0] dest_hash = sha256(key.encode('utf8')).digest() dest_int = int.from_bytes(dest_hash, byteorder='little') remainder = (dest_int % len(self.instances)) return self.instances[remainder]
def _get_instance(self, key: str) -> str: "Get the instance responsible for handling the given key.\n\n Note: For federation sending and pushers the config for which instance\n is sending is known only to the sender instance, so we don't expose this\n method by default.\n " if (not self.instances): raise Exception('Unknown worker') if (len(self.instances) == 1): return self.instances[0] dest_hash = sha256(key.encode('utf8')).digest() dest_int = int.from_bytes(dest_hash, byteorder='little') remainder = (dest_int % len(self.instances)) return self.instances[remainder]<|docstring|>Get the instance responsible for handling the given key. Note: For federation sending and pushers the config for which instance is sending is known only to the sender instance, so we don't expose this method by default.<|endoftext|>
ba5b468173fccf8a227ef05128688b1972f0e85699af1110135138a5919a03c8
def get_instance(self, key: str) -> str: 'Get the instance responsible for handling the given key.' return self._get_instance(key)
Get the instance responsible for handling the given key.
synapse/config/_base.py
get_instance
jkanefendt/synapse
7
python
def get_instance(self, key: str) -> str: return self._get_instance(key)
def get_instance(self, key: str) -> str: return self._get_instance(key)<|docstring|>Get the instance responsible for handling the given key.<|endoftext|>
a140ca1f5a14c27e19f016a743b38bc5bfdee0cfdd6dcde69223e344ea494fa8
def apply_pot(input_dir, output_dir, forward_properties_csv=None, inverse_properties_csv=None, repetitions=10, run_sequential=True): 'Apply the POT to input_dir and save the results to output_dir.\n\n :param input_dir: input directory to be transformed (recursively)\n :param output_dir: path to the output transformed dir\n :param forward_properties_csv: if not None, properties of the transformed images\n are stored here (including fwd transform time)\n :param inverse_properties_csv: if not None, properties of the reconstructed images\n are stored here (including inverse transform time)\n :param repetitions: number of repetitions used to calculate execution time\n (for both forward and inverse transform)\n :param run_sequential: if True, transformations are run in sequential mode\n (as opposed to parallel) so that time measurements can be taken\n\n :raises AssertionError: if transformation/reconstruction is not lossless\n\n :return: fwd_pot_df, inv_pot_df, i.e., the dataframes obtained with get_df\n for POTVersionTAble and InversePOTVersionTable, respectively.\n ' return abstract_mhdc_transform.apply_transform(input_dir=input_dir, output_dir=output_dir, forward_class=POTVersionTable, inverse_class=InversePOTVersionTable, forward_properties_csv=forward_properties_csv, inverse_properties_csv=inverse_properties_csv, repetitions=repetitions, run_sequential=run_sequential)
Apply the POT to input_dir and save the results to output_dir. :param input_dir: input directory to be transformed (recursively) :param output_dir: path to the output transformed dir :param forward_properties_csv: if not None, properties of the transformed images are stored here (including fwd transform time) :param inverse_properties_csv: if not None, properties of the reconstructed images are stored here (including inverse transform time) :param repetitions: number of repetitions used to calculate execution time (for both forward and inverse transform) :param run_sequential: if True, transformations are run in sequential mode (as opposed to parallel) so that time measurements can be taken :raises AssertionError: if transformation/reconstruction is not lossless :return: fwd_pot_df, inv_pot_df, i.e., the dataframes obtained with get_df for POTVersionTAble and InversePOTVersionTable, respectively.
plugins/plugin_mhdc_transforms/pot.py
apply_pot
OscarMaireles/experiment-notebook
0
python
def apply_pot(input_dir, output_dir, forward_properties_csv=None, inverse_properties_csv=None, repetitions=10, run_sequential=True): 'Apply the POT to input_dir and save the results to output_dir.\n\n :param input_dir: input directory to be transformed (recursively)\n :param output_dir: path to the output transformed dir\n :param forward_properties_csv: if not None, properties of the transformed images\n are stored here (including fwd transform time)\n :param inverse_properties_csv: if not None, properties of the reconstructed images\n are stored here (including inverse transform time)\n :param repetitions: number of repetitions used to calculate execution time\n (for both forward and inverse transform)\n :param run_sequential: if True, transformations are run in sequential mode\n (as opposed to parallel) so that time measurements can be taken\n\n :raises AssertionError: if transformation/reconstruction is not lossless\n\n :return: fwd_pot_df, inv_pot_df, i.e., the dataframes obtained with get_df\n for POTVersionTAble and InversePOTVersionTable, respectively.\n ' return abstract_mhdc_transform.apply_transform(input_dir=input_dir, output_dir=output_dir, forward_class=POTVersionTable, inverse_class=InversePOTVersionTable, forward_properties_csv=forward_properties_csv, inverse_properties_csv=inverse_properties_csv, repetitions=repetitions, run_sequential=run_sequential)
def apply_pot(input_dir, output_dir, forward_properties_csv=None, inverse_properties_csv=None, repetitions=10, run_sequential=True): 'Apply the POT to input_dir and save the results to output_dir.\n\n :param input_dir: input directory to be transformed (recursively)\n :param output_dir: path to the output transformed dir\n :param forward_properties_csv: if not None, properties of the transformed images\n are stored here (including fwd transform time)\n :param inverse_properties_csv: if not None, properties of the reconstructed images\n are stored here (including inverse transform time)\n :param repetitions: number of repetitions used to calculate execution time\n (for both forward and inverse transform)\n :param run_sequential: if True, transformations are run in sequential mode\n (as opposed to parallel) so that time measurements can be taken\n\n :raises AssertionError: if transformation/reconstruction is not lossless\n\n :return: fwd_pot_df, inv_pot_df, i.e., the dataframes obtained with get_df\n for POTVersionTAble and InversePOTVersionTable, respectively.\n ' return abstract_mhdc_transform.apply_transform(input_dir=input_dir, output_dir=output_dir, forward_class=POTVersionTable, inverse_class=InversePOTVersionTable, forward_properties_csv=forward_properties_csv, inverse_properties_csv=inverse_properties_csv, repetitions=repetitions, run_sequential=run_sequential)<|docstring|>Apply the POT to input_dir and save the results to output_dir. :param input_dir: input directory to be transformed (recursively) :param output_dir: path to the output transformed dir :param forward_properties_csv: if not None, properties of the transformed images are stored here (including fwd transform time) :param inverse_properties_csv: if not None, properties of the reconstructed images are stored here (including inverse transform time) :param repetitions: number of repetitions used to calculate execution time (for both forward and inverse transform) :param run_sequential: if True, transformations are run in sequential mode (as opposed to parallel) so that time measurements can be taken :raises AssertionError: if transformation/reconstruction is not lossless :return: fwd_pot_df, inv_pot_df, i.e., the dataframes obtained with get_df for POTVersionTAble and InversePOTVersionTable, respectively.<|endoftext|>
b1b6f27ba47fdcd0ddbbfe6da36e1f15bd1d6b7269a34da17e42d049b3d60c2f
def register_hooks(self, hooks): '\n Register hooks to the feature getter. The hooks are executed in the order\n they are registered.\n\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n ' hooks = [h for h in hooks if (h is not None)] for h in hooks: assert isinstance(h, HookBase) h.trainer = weakref.proxy(self) self._hooks.extend(hooks)
Register hooks to the feature getter. The hooks are executed in the order they are registered. Args: hooks (list[Optional[HookBase]]): list of hooks
liuy/utils/LiuyFeatureGetter.py
register_hooks
liuy-61/detectron2_origin_liuy
2
python
def register_hooks(self, hooks): '\n Register hooks to the feature getter. The hooks are executed in the order\n they are registered.\n\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n ' hooks = [h for h in hooks if (h is not None)] for h in hooks: assert isinstance(h, HookBase) h.trainer = weakref.proxy(self) self._hooks.extend(hooks)
def register_hooks(self, hooks): '\n Register hooks to the feature getter. The hooks are executed in the order\n they are registered.\n\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n ' hooks = [h for h in hooks if (h is not None)] for h in hooks: assert isinstance(h, HookBase) h.trainer = weakref.proxy(self) self._hooks.extend(hooks)<|docstring|>Register hooks to the feature getter. The hooks are executed in the order they are registered. Args: hooks (list[Optional[HookBase]]): list of hooks<|endoftext|>
6827c379e9f868ea509d8c140d61487d8395c212176fb7654374fafed7d99259
def save_feature(self, start_iter: int, max_iter: int, project_id=None, selected_image_file=None): "\n Args:\n start_iter, max_iter (int): See docs above\n return a list of dict, dict :{'image_id':int, 'feature_tensor':tensor}\n to avoid out of memory, we save the feature_list as file,\n and the project_id is used to get the path where to save,\n selected_image_file : a list of image id, it is used to split the selected images' mask feature\n and unselected images' mask feature\n\n " logger = logging.getLogger(__name__) logger.info('Starting getting feature from iteration {}'.format(start_iter)) selected_feature_list = [] unselected_feature_list = [] selected_serial_number = 0 unselected_serial_number = 0 self.iter = self.start_iter = start_iter self.max_iter = max_iter with EventStorage(start_iter) as self.storage: try: self.before_train() for self.iter in range(start_iter, max_iter): self.before_step() '\n in self.run step do what is diff\n ' len_slected_feature = 20 feature = self.run_step() if ((project_id is not None) and (selected_image_file is not None)): if (feature['image_id'] in selected_image_file): selected_feature_list.append(feature) if (len(selected_feature_list) == len_slected_feature): save_mask_feature(project_id=project_id, mask_feature=selected_feature_list, serial_number=selected_serial_number, selected_or_not=True) selected_serial_number += 1 num = ((selected_serial_number * len_slected_feature) + (unselected_serial_number * len_slected_feature)) print("save {} images' mask feature, still need {} images' feature to save ".format(num, (self.max_iter - num))) del selected_feature_list selected_feature_list = [] else: unselected_feature_list.append(feature) if (len(unselected_feature_list) == len_slected_feature): save_mask_feature(project_id=project_id, mask_feature=unselected_feature_list, serial_number=unselected_serial_number, selected_or_not=False) unselected_serial_number += 1 num = ((selected_serial_number * len_slected_feature) + (unselected_serial_number * len_slected_feature)) print("save {} images' mask feature, still need {} images' feature to save ".format(num, (self.max_iter - num))) del unselected_feature_list unselected_feature_list = [] self.after_step() finally: self.after_train() if (len(selected_feature_list) > 0): save_mask_feature(project_id=project_id, mask_feature=selected_feature_list, serial_number=selected_serial_number, selected_or_not=True) selected_serial_number += 1 if (len(unselected_feature_list) > 0): save_mask_feature(project_id=project_id, mask_feature=unselected_feature_list, serial_number=unselected_serial_number, selected_or_not=False) unselected_serial_number += 1
Args: start_iter, max_iter (int): See docs above return a list of dict, dict :{'image_id':int, 'feature_tensor':tensor} to avoid out of memory, we save the feature_list as file, and the project_id is used to get the path where to save, selected_image_file : a list of image id, it is used to split the selected images' mask feature and unselected images' mask feature
liuy/utils/LiuyFeatureGetter.py
save_feature
liuy-61/detectron2_origin_liuy
2
python
def save_feature(self, start_iter: int, max_iter: int, project_id=None, selected_image_file=None): "\n Args:\n start_iter, max_iter (int): See docs above\n return a list of dict, dict :{'image_id':int, 'feature_tensor':tensor}\n to avoid out of memory, we save the feature_list as file,\n and the project_id is used to get the path where to save,\n selected_image_file : a list of image id, it is used to split the selected images' mask feature\n and unselected images' mask feature\n\n " logger = logging.getLogger(__name__) logger.info('Starting getting feature from iteration {}'.format(start_iter)) selected_feature_list = [] unselected_feature_list = [] selected_serial_number = 0 unselected_serial_number = 0 self.iter = self.start_iter = start_iter self.max_iter = max_iter with EventStorage(start_iter) as self.storage: try: self.before_train() for self.iter in range(start_iter, max_iter): self.before_step() '\n in self.run step do what is diff\n ' len_slected_feature = 20 feature = self.run_step() if ((project_id is not None) and (selected_image_file is not None)): if (feature['image_id'] in selected_image_file): selected_feature_list.append(feature) if (len(selected_feature_list) == len_slected_feature): save_mask_feature(project_id=project_id, mask_feature=selected_feature_list, serial_number=selected_serial_number, selected_or_not=True) selected_serial_number += 1 num = ((selected_serial_number * len_slected_feature) + (unselected_serial_number * len_slected_feature)) print("save {} images' mask feature, still need {} images' feature to save ".format(num, (self.max_iter - num))) del selected_feature_list selected_feature_list = [] else: unselected_feature_list.append(feature) if (len(unselected_feature_list) == len_slected_feature): save_mask_feature(project_id=project_id, mask_feature=unselected_feature_list, serial_number=unselected_serial_number, selected_or_not=False) unselected_serial_number += 1 num = ((selected_serial_number * len_slected_feature) + (unselected_serial_number * len_slected_feature)) print("save {} images' mask feature, still need {} images' feature to save ".format(num, (self.max_iter - num))) del unselected_feature_list unselected_feature_list = [] self.after_step() finally: self.after_train() if (len(selected_feature_list) > 0): save_mask_feature(project_id=project_id, mask_feature=selected_feature_list, serial_number=selected_serial_number, selected_or_not=True) selected_serial_number += 1 if (len(unselected_feature_list) > 0): save_mask_feature(project_id=project_id, mask_feature=unselected_feature_list, serial_number=unselected_serial_number, selected_or_not=False) unselected_serial_number += 1
def save_feature(self, start_iter: int, max_iter: int, project_id=None, selected_image_file=None): "\n Args:\n start_iter, max_iter (int): See docs above\n return a list of dict, dict :{'image_id':int, 'feature_tensor':tensor}\n to avoid out of memory, we save the feature_list as file,\n and the project_id is used to get the path where to save,\n selected_image_file : a list of image id, it is used to split the selected images' mask feature\n and unselected images' mask feature\n\n " logger = logging.getLogger(__name__) logger.info('Starting getting feature from iteration {}'.format(start_iter)) selected_feature_list = [] unselected_feature_list = [] selected_serial_number = 0 unselected_serial_number = 0 self.iter = self.start_iter = start_iter self.max_iter = max_iter with EventStorage(start_iter) as self.storage: try: self.before_train() for self.iter in range(start_iter, max_iter): self.before_step() '\n in self.run step do what is diff\n ' len_slected_feature = 20 feature = self.run_step() if ((project_id is not None) and (selected_image_file is not None)): if (feature['image_id'] in selected_image_file): selected_feature_list.append(feature) if (len(selected_feature_list) == len_slected_feature): save_mask_feature(project_id=project_id, mask_feature=selected_feature_list, serial_number=selected_serial_number, selected_or_not=True) selected_serial_number += 1 num = ((selected_serial_number * len_slected_feature) + (unselected_serial_number * len_slected_feature)) print("save {} images' mask feature, still need {} images' feature to save ".format(num, (self.max_iter - num))) del selected_feature_list selected_feature_list = [] else: unselected_feature_list.append(feature) if (len(unselected_feature_list) == len_slected_feature): save_mask_feature(project_id=project_id, mask_feature=unselected_feature_list, serial_number=unselected_serial_number, selected_or_not=False) unselected_serial_number += 1 num = ((selected_serial_number * len_slected_feature) + (unselected_serial_number * len_slected_feature)) print("save {} images' mask feature, still need {} images' feature to save ".format(num, (self.max_iter - num))) del unselected_feature_list unselected_feature_list = [] self.after_step() finally: self.after_train() if (len(selected_feature_list) > 0): save_mask_feature(project_id=project_id, mask_feature=selected_feature_list, serial_number=selected_serial_number, selected_or_not=True) selected_serial_number += 1 if (len(unselected_feature_list) > 0): save_mask_feature(project_id=project_id, mask_feature=unselected_feature_list, serial_number=unselected_serial_number, selected_or_not=False) unselected_serial_number += 1<|docstring|>Args: start_iter, max_iter (int): See docs above return a list of dict, dict :{'image_id':int, 'feature_tensor':tensor} to avoid out of memory, we save the feature_list as file, and the project_id is used to get the path where to save, selected_image_file : a list of image id, it is used to split the selected images' mask feature and unselected images' mask feature<|endoftext|>
b34bfcf94aefb2a977bdba84068d662ade5fb03eda9696aac9fcff46dc1e6ec9
def __init__(self, model, data_loader): '\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of losses.\n data_loader: an iterable. Contains data to be used to call model.\n\n ' super().__init__() "\n We set the model to training mode in the feature getter.\n but we do not optimize the model\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n " model.train() self.model = model self.data_loader = data_loader self._data_loader_iter = iter(data_loader)
Args: model: a torch Module. Takes a data from data_loader and returns a dict of losses. data_loader: an iterable. Contains data to be used to call model.
liuy/utils/LiuyFeatureGetter.py
__init__
liuy-61/detectron2_origin_liuy
2
python
def __init__(self, model, data_loader): '\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of losses.\n data_loader: an iterable. Contains data to be used to call model.\n\n ' super().__init__() "\n We set the model to training mode in the feature getter.\n but we do not optimize the model\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n " model.train() self.model = model self.data_loader = data_loader self._data_loader_iter = iter(data_loader)
def __init__(self, model, data_loader): '\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of losses.\n data_loader: an iterable. Contains data to be used to call model.\n\n ' super().__init__() "\n We set the model to training mode in the feature getter.\n but we do not optimize the model\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n " model.train() self.model = model self.data_loader = data_loader self._data_loader_iter = iter(data_loader)<|docstring|>Args: model: a torch Module. Takes a data from data_loader and returns a dict of losses. data_loader: an iterable. Contains data to be used to call model.<|endoftext|>
c3461ab5855bb55d3b32c03790f727c7e17f1dc1ae46b2b3e881b585c9b3cde5
def run_step(self): '\n Implement the standard getting feature logic described above.\n ' start = time.perf_counter() "\n If your want to do something with the data, you can wrap the data loader.\n return a dict 'image_id' is the feature's corresponding image' index\n " data = next(self._data_loader_iter) with torch.no_grad(): '\n If your want to do something with the losses, you can wrap the model.\n ' assert (len(data) == 1), 'batch_size is not 1' feature_dict = {'image_id': data[0]['image_id'], 'feature_tensor': self.model.get_mask_feature(data)} return feature_dict
Implement the standard getting feature logic described above.
liuy/utils/LiuyFeatureGetter.py
run_step
liuy-61/detectron2_origin_liuy
2
python
def run_step(self): '\n \n ' start = time.perf_counter() "\n If your want to do something with the data, you can wrap the data loader.\n return a dict 'image_id' is the feature's corresponding image' index\n " data = next(self._data_loader_iter) with torch.no_grad(): '\n If your want to do something with the losses, you can wrap the model.\n ' assert (len(data) == 1), 'batch_size is not 1' feature_dict = {'image_id': data[0]['image_id'], 'feature_tensor': self.model.get_mask_feature(data)} return feature_dict
def run_step(self): '\n \n ' start = time.perf_counter() "\n If your want to do something with the data, you can wrap the data loader.\n return a dict 'image_id' is the feature's corresponding image' index\n " data = next(self._data_loader_iter) with torch.no_grad(): '\n If your want to do something with the losses, you can wrap the model.\n ' assert (len(data) == 1), 'batch_size is not 1' feature_dict = {'image_id': data[0]['image_id'], 'feature_tensor': self.model.get_mask_feature(data)} return feature_dict<|docstring|>Implement the standard getting feature logic described above.<|endoftext|>
23dd37ffa50edc065ce266ed7f7a25a02fc9ff8696769e1a80881009d679026a
def build_hooks(self): '\n Build a list of default hooks, including timing, evaluation,\n checkpointing, lr scheduling, precise BN, writing events.\n\n Returns:\n list[HookBase]:\n ' cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 ret = [hooks.IterationTimer()] def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results return ret
Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]:
liuy/utils/LiuyFeatureGetter.py
build_hooks
liuy-61/detectron2_origin_liuy
2
python
def build_hooks(self): '\n Build a list of default hooks, including timing, evaluation,\n checkpointing, lr scheduling, precise BN, writing events.\n\n Returns:\n list[HookBase]:\n ' cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 ret = [hooks.IterationTimer()] def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results return ret
def build_hooks(self): '\n Build a list of default hooks, including timing, evaluation,\n checkpointing, lr scheduling, precise BN, writing events.\n\n Returns:\n list[HookBase]:\n ' cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 ret = [hooks.IterationTimer()] def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results return ret<|docstring|>Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]:<|endoftext|>
56e24d031ff233aaecef030e314a25932243910c45334adf2c23e06226b65494
def save_feature(self, project_id=None, selected_image_file=None): '\n project_id: to avoid out of memory we save the feature part by part ,\n and the project_id is used to compute the path to save\n Returns:\n OrderedDict of results,\n ' feature = super().save_feature(self.start_iter, self.max_iter, project_id=project_id, selected_image_file=selected_image_file) return feature
project_id: to avoid out of memory we save the feature part by part , and the project_id is used to compute the path to save Returns: OrderedDict of results,
liuy/utils/LiuyFeatureGetter.py
save_feature
liuy-61/detectron2_origin_liuy
2
python
def save_feature(self, project_id=None, selected_image_file=None): '\n project_id: to avoid out of memory we save the feature part by part ,\n and the project_id is used to compute the path to save\n Returns:\n OrderedDict of results,\n ' feature = super().save_feature(self.start_iter, self.max_iter, project_id=project_id, selected_image_file=selected_image_file) return feature
def save_feature(self, project_id=None, selected_image_file=None): '\n project_id: to avoid out of memory we save the feature part by part ,\n and the project_id is used to compute the path to save\n Returns:\n OrderedDict of results,\n ' feature = super().save_feature(self.start_iter, self.max_iter, project_id=project_id, selected_image_file=selected_image_file) return feature<|docstring|>project_id: to avoid out of memory we save the feature part by part , and the project_id is used to compute the path to save Returns: OrderedDict of results,<|endoftext|>
7588cd063bb92633c599ba76dac0bc0fe1c4481dc947604d563ca6a5ed816240
@classmethod def build_model(cls, cfg): "\n Returns:\n torch.nn.Module:\n\n It now calls :func:`detectron2.modeling.build_model`.\n Overwrite it if you'd like a different model.\n " model = build_model(cfg) logger = logging.getLogger(__name__) logger.info('Model:\n{}'.format(model)) return model
Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model.
liuy/utils/LiuyFeatureGetter.py
build_model
liuy-61/detectron2_origin_liuy
2
python
@classmethod def build_model(cls, cfg): "\n Returns:\n torch.nn.Module:\n\n It now calls :func:`detectron2.modeling.build_model`.\n Overwrite it if you'd like a different model.\n " model = build_model(cfg) logger = logging.getLogger(__name__) logger.info('Model:\n{}'.format(model)) return model
@classmethod def build_model(cls, cfg): "\n Returns:\n torch.nn.Module:\n\n It now calls :func:`detectron2.modeling.build_model`.\n Overwrite it if you'd like a different model.\n " model = build_model(cfg) logger = logging.getLogger(__name__) logger.info('Model:\n{}'.format(model)) return model<|docstring|>Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model.<|endoftext|>
7505625d2e711bfd35b4582397bd386619671fa3e63bc3fc716767c20e69f5ce
@classmethod def build_train_loader(cls, cfg): "\n Returns:\n iterable\n\n It now calls :func:`detectron2.data.build_detection_train_loader`.\n Overwrite it if you'd like a different data loader.\n " return build_detection_train_loader(cfg)
Returns: iterable It now calls :func:`detectron2.data.build_detection_train_loader`. Overwrite it if you'd like a different data loader.
liuy/utils/LiuyFeatureGetter.py
build_train_loader
liuy-61/detectron2_origin_liuy
2
python
@classmethod def build_train_loader(cls, cfg): "\n Returns:\n iterable\n\n It now calls :func:`detectron2.data.build_detection_train_loader`.\n Overwrite it if you'd like a different data loader.\n " return build_detection_train_loader(cfg)
@classmethod def build_train_loader(cls, cfg): "\n Returns:\n iterable\n\n It now calls :func:`detectron2.data.build_detection_train_loader`.\n Overwrite it if you'd like a different data loader.\n " return build_detection_train_loader(cfg)<|docstring|>Returns: iterable It now calls :func:`detectron2.data.build_detection_train_loader`. Overwrite it if you'd like a different data loader.<|endoftext|>
fba2ff6dd0f0b76f214f137a871d262479f44e9b0b39a90a6efef8d088191496
def formatNode(node): 'Create a string version of the given node' if (node == None): return '' t = type(node) if (t == str): return (("'" + node) + "'") elif ((t == int) or (t == float)): return str(node) elif (t == list): return formatList(node, None) else: return printFunction(node, 0)
Create a string version of the given node
hintgen/display.py
formatNode
krivers/ITAP-django
19
python
def formatNode(node): if (node == None): return t = type(node) if (t == str): return (("'" + node) + "'") elif ((t == int) or (t == float)): return str(node) elif (t == list): return formatList(node, None) else: return printFunction(node, 0)
def formatNode(node): if (node == None): return t = type(node) if (t == str): return (("'" + node) + "'") elif ((t == int) or (t == float)): return str(node) elif (t == list): return formatList(node, None) else: return printFunction(node, 0)<|docstring|>Create a string version of the given node<|endoftext|>
a6d8e6ba670bdc8ae4e4de4a4d572a1ab5dece35530debf91f25d4fc7667f73e
def signature_explorer(prompt=True): '\n\t\tOpen the signature explorer UI.\n\t\t:param prompt: if True, prompt the user to open a file immediately.\n\t\t:return: `App`, a QT window\n\t\t' if (('qt_major_version' in binaryninjaui.__dict__) and (binaryninjaui.qt_major_version == 6)): from PySide6.QtWidgets import QApplication else: from PySide2.QtWidgets import QApplication app = QApplication.instance() global widget widget = sigexplorer.App() if prompt: widget.open_file() widget.show() if app: app.exec_() return widget
Open the signature explorer UI. :param prompt: if True, prompt the user to open a file immediately. :return: `App`, a QT window
sigkit/__init__.py
signature_explorer
RLS-Rylan/sigkit
0
python
def signature_explorer(prompt=True): '\n\t\tOpen the signature explorer UI.\n\t\t:param prompt: if True, prompt the user to open a file immediately.\n\t\t:return: `App`, a QT window\n\t\t' if (('qt_major_version' in binaryninjaui.__dict__) and (binaryninjaui.qt_major_version == 6)): from PySide6.QtWidgets import QApplication else: from PySide2.QtWidgets import QApplication app = QApplication.instance() global widget widget = sigexplorer.App() if prompt: widget.open_file() widget.show() if app: app.exec_() return widget
def signature_explorer(prompt=True): '\n\t\tOpen the signature explorer UI.\n\t\t:param prompt: if True, prompt the user to open a file immediately.\n\t\t:return: `App`, a QT window\n\t\t' if (('qt_major_version' in binaryninjaui.__dict__) and (binaryninjaui.qt_major_version == 6)): from PySide6.QtWidgets import QApplication else: from PySide2.QtWidgets import QApplication app = QApplication.instance() global widget widget = sigexplorer.App() if prompt: widget.open_file() widget.show() if app: app.exec_() return widget<|docstring|>Open the signature explorer UI. :param prompt: if True, prompt the user to open a file immediately. :return: `App`, a QT window<|endoftext|>
416f414e6eb0b92aacf5043e42f2dc5f553543b91898eb103c31f874d10eba0b
def db_describe_dict(data, table_name=None): '\n Helper function prints sqlalchemy table definitions\n using dictionary key and value pairs.\n :param data:\n :return:\n ' if (table_name is None): table_name = 'Table1' m = PY_TYPE_TO_SQL_ALCHEMY_LABEL_MAP others = list() print("__tablename__ = '{}'".format(table_name)) for (k, v) in sorted(data.items()): if isinstance(v, dict): others.append((k, v)) else: print('{} = Column({})'.format(k, m.get(type(v), 'String'))) for (k, v) in others: print("\n\n__tablename__ = '{}'\n".format(k)) db_describe_dict(v) print('\n\n')
Helper function prints sqlalchemy table definitions using dictionary key and value pairs. :param data: :return:
stocklook/utils/database.py
db_describe_dict
zbarge/stocklook
149
python
def db_describe_dict(data, table_name=None): '\n Helper function prints sqlalchemy table definitions\n using dictionary key and value pairs.\n :param data:\n :return:\n ' if (table_name is None): table_name = 'Table1' m = PY_TYPE_TO_SQL_ALCHEMY_LABEL_MAP others = list() print("__tablename__ = '{}'".format(table_name)) for (k, v) in sorted(data.items()): if isinstance(v, dict): others.append((k, v)) else: print('{} = Column({})'.format(k, m.get(type(v), 'String'))) for (k, v) in others: print("\n\n__tablename__ = '{}'\n".format(k)) db_describe_dict(v) print('\n\n')
def db_describe_dict(data, table_name=None): '\n Helper function prints sqlalchemy table definitions\n using dictionary key and value pairs.\n :param data:\n :return:\n ' if (table_name is None): table_name = 'Table1' m = PY_TYPE_TO_SQL_ALCHEMY_LABEL_MAP others = list() print("__tablename__ = '{}'".format(table_name)) for (k, v) in sorted(data.items()): if isinstance(v, dict): others.append((k, v)) else: print('{} = Column({})'.format(k, m.get(type(v), 'String'))) for (k, v) in others: print("\n\n__tablename__ = '{}'\n".format(k)) db_describe_dict(v) print('\n\n')<|docstring|>Helper function prints sqlalchemy table definitions using dictionary key and value pairs. :param data: :return:<|endoftext|>
734720fd3fdaa4fb6f112eee66264591a5a0f541f42984041f799f61d0eff926
def db_get_python_dtypes(sql_table, date_type=None, include_str=False): "\n Returns a dictionary of {column_name: python_dtype} after extracting\n information from :param sql_table.\n\n :param sql_table: (declarative_base object)\n A Sqlalchemy Table object.\n\n :param date_type: (object, callable, default None)\n None defaults to function stocklook.utils.timetools.timestamp_to_local\n\n :param include_str: (bool, default False)\n True includes columns in string format in the returned dictionary.\n Usually the data being parsed is in string format so it's not often needed.\n\n :return: (dict)\n " d = dict() cols = sql_table.__table__.columns if (date_type is None): date_type = timestamp_to_local if (not include_str): cols = [c for c in cols if (c.type.python_type != str)] for c in cols: py_type = c.type.python_type col = c.name if ('date' in str(py_type).lower()): d[col] = date_type else: d[col] = py_type return d
Returns a dictionary of {column_name: python_dtype} after extracting information from :param sql_table. :param sql_table: (declarative_base object) A Sqlalchemy Table object. :param date_type: (object, callable, default None) None defaults to function stocklook.utils.timetools.timestamp_to_local :param include_str: (bool, default False) True includes columns in string format in the returned dictionary. Usually the data being parsed is in string format so it's not often needed. :return: (dict)
stocklook/utils/database.py
db_get_python_dtypes
zbarge/stocklook
149
python
def db_get_python_dtypes(sql_table, date_type=None, include_str=False): "\n Returns a dictionary of {column_name: python_dtype} after extracting\n information from :param sql_table.\n\n :param sql_table: (declarative_base object)\n A Sqlalchemy Table object.\n\n :param date_type: (object, callable, default None)\n None defaults to function stocklook.utils.timetools.timestamp_to_local\n\n :param include_str: (bool, default False)\n True includes columns in string format in the returned dictionary.\n Usually the data being parsed is in string format so it's not often needed.\n\n :return: (dict)\n " d = dict() cols = sql_table.__table__.columns if (date_type is None): date_type = timestamp_to_local if (not include_str): cols = [c for c in cols if (c.type.python_type != str)] for c in cols: py_type = c.type.python_type col = c.name if ('date' in str(py_type).lower()): d[col] = date_type else: d[col] = py_type return d
def db_get_python_dtypes(sql_table, date_type=None, include_str=False): "\n Returns a dictionary of {column_name: python_dtype} after extracting\n information from :param sql_table.\n\n :param sql_table: (declarative_base object)\n A Sqlalchemy Table object.\n\n :param date_type: (object, callable, default None)\n None defaults to function stocklook.utils.timetools.timestamp_to_local\n\n :param include_str: (bool, default False)\n True includes columns in string format in the returned dictionary.\n Usually the data being parsed is in string format so it's not often needed.\n\n :return: (dict)\n " d = dict() cols = sql_table.__table__.columns if (date_type is None): date_type = timestamp_to_local if (not include_str): cols = [c for c in cols if (c.type.python_type != str)] for c in cols: py_type = c.type.python_type col = c.name if ('date' in str(py_type).lower()): d[col] = date_type else: d[col] = py_type return d<|docstring|>Returns a dictionary of {column_name: python_dtype} after extracting information from :param sql_table. :param sql_table: (declarative_base object) A Sqlalchemy Table object. :param date_type: (object, callable, default None) None defaults to function stocklook.utils.timetools.timestamp_to_local :param include_str: (bool, default False) True includes columns in string format in the returned dictionary. Usually the data being parsed is in string format so it's not often needed. :return: (dict)<|endoftext|>
5f0e87d4e5429a0bd7ac9cace267ffdeb40cf929685696b8ff875e5290cc6290
def db_map_dict_to_alchemy_object(d, sql_object, dtype_items=None, raise_on_error=False): '\n Converts a dictionary object\n into a SQLAlchemy object. The dictionary\n keys should exactly match the attribute names\n of the SQLAlchemy object or a warning will be printed\n and missing attributes will be skipped.\n\n :param d:\n :param sql_object:\n :param dtype_items:\n :param raise_on_error:\n :return:\n ' if (dtype_items is not None): for (c, tp) in dtype_items: try: d[c] = tp(d[c]) except KeyError: pass except (ValueError, TypeError): d[c] = None obj = sql_object() for (k, v) in d.items(): try: setattr(obj, k, v) except AttributeError: msg = 'SQL object {} missing: {}'.format(sql_object, k) if raise_on_error: raise KeyError(msg) return obj
Converts a dictionary object into a SQLAlchemy object. The dictionary keys should exactly match the attribute names of the SQLAlchemy object or a warning will be printed and missing attributes will be skipped. :param d: :param sql_object: :param dtype_items: :param raise_on_error: :return:
stocklook/utils/database.py
db_map_dict_to_alchemy_object
zbarge/stocklook
149
python
def db_map_dict_to_alchemy_object(d, sql_object, dtype_items=None, raise_on_error=False): '\n Converts a dictionary object\n into a SQLAlchemy object. The dictionary\n keys should exactly match the attribute names\n of the SQLAlchemy object or a warning will be printed\n and missing attributes will be skipped.\n\n :param d:\n :param sql_object:\n :param dtype_items:\n :param raise_on_error:\n :return:\n ' if (dtype_items is not None): for (c, tp) in dtype_items: try: d[c] = tp(d[c]) except KeyError: pass except (ValueError, TypeError): d[c] = None obj = sql_object() for (k, v) in d.items(): try: setattr(obj, k, v) except AttributeError: msg = 'SQL object {} missing: {}'.format(sql_object, k) if raise_on_error: raise KeyError(msg) return obj
def db_map_dict_to_alchemy_object(d, sql_object, dtype_items=None, raise_on_error=False): '\n Converts a dictionary object\n into a SQLAlchemy object. The dictionary\n keys should exactly match the attribute names\n of the SQLAlchemy object or a warning will be printed\n and missing attributes will be skipped.\n\n :param d:\n :param sql_object:\n :param dtype_items:\n :param raise_on_error:\n :return:\n ' if (dtype_items is not None): for (c, tp) in dtype_items: try: d[c] = tp(d[c]) except KeyError: pass except (ValueError, TypeError): d[c] = None obj = sql_object() for (k, v) in d.items(): try: setattr(obj, k, v) except AttributeError: msg = 'SQL object {} missing: {}'.format(sql_object, k) if raise_on_error: raise KeyError(msg) return obj<|docstring|>Converts a dictionary object into a SQLAlchemy object. The dictionary keys should exactly match the attribute names of the SQLAlchemy object or a warning will be printed and missing attributes will be skipped. :param d: :param sql_object: :param dtype_items: :param raise_on_error: :return:<|endoftext|>
8f7bd8707c07d2086dbf78595a2874acd42f39326cfd1588220c705dda58bd0d
def _setup(self): '\n Loads DatabaseLoadingThread.dtypes & DatabaseLoadingThread.dtype_items\n using python data types from the SQLAlchemy table.\n :return:\n ' d = self.dtypes cols = self.obj.__table__.columns for c in cols: py_type = c.type.python_type col = c.name if (py_type == str): continue elif ('date' in str(py_type).lower()): d[col] = timestamp_to_local else: d[col] = py_type self.dtype_items = d.items()
Loads DatabaseLoadingThread.dtypes & DatabaseLoadingThread.dtype_items using python data types from the SQLAlchemy table. :return:
stocklook/utils/database.py
_setup
zbarge/stocklook
149
python
def _setup(self): '\n Loads DatabaseLoadingThread.dtypes & DatabaseLoadingThread.dtype_items\n using python data types from the SQLAlchemy table.\n :return:\n ' d = self.dtypes cols = self.obj.__table__.columns for c in cols: py_type = c.type.python_type col = c.name if (py_type == str): continue elif ('date' in str(py_type).lower()): d[col] = timestamp_to_local else: d[col] = py_type self.dtype_items = d.items()
def _setup(self): '\n Loads DatabaseLoadingThread.dtypes & DatabaseLoadingThread.dtype_items\n using python data types from the SQLAlchemy table.\n :return:\n ' d = self.dtypes cols = self.obj.__table__.columns for c in cols: py_type = c.type.python_type col = c.name if (py_type == str): continue elif ('date' in str(py_type).lower()): d[col] = timestamp_to_local else: d[col] = py_type self.dtype_items = d.items()<|docstring|>Loads DatabaseLoadingThread.dtypes & DatabaseLoadingThread.dtype_items using python data types from the SQLAlchemy table. :return:<|endoftext|>
41190b0f890e71ab554327648a871b55ff90a832dfd4d1fd5517a4c41afb98ea
def get_sql_record(self, d): '\n Converts a dictionary object\n into a SQLAlchemy object. The dictionary\n keys should exactly match the attribute names\n of the SQLAlchemy object or a warning will be printed\n and missing attributes will be skipped.\n :param d:\n :return:\n ' return db_map_dict_to_alchemy_object(d, self.obj, dtype_items=self.dtype_items, raise_on_error=self.raise_on_error)
Converts a dictionary object into a SQLAlchemy object. The dictionary keys should exactly match the attribute names of the SQLAlchemy object or a warning will be printed and missing attributes will be skipped. :param d: :return:
stocklook/utils/database.py
get_sql_record
zbarge/stocklook
149
python
def get_sql_record(self, d): '\n Converts a dictionary object\n into a SQLAlchemy object. The dictionary\n keys should exactly match the attribute names\n of the SQLAlchemy object or a warning will be printed\n and missing attributes will be skipped.\n :param d:\n :return:\n ' return db_map_dict_to_alchemy_object(d, self.obj, dtype_items=self.dtype_items, raise_on_error=self.raise_on_error)
def get_sql_record(self, d): '\n Converts a dictionary object\n into a SQLAlchemy object. The dictionary\n keys should exactly match the attribute names\n of the SQLAlchemy object or a warning will be printed\n and missing attributes will be skipped.\n :param d:\n :return:\n ' return db_map_dict_to_alchemy_object(d, self.obj, dtype_items=self.dtype_items, raise_on_error=self.raise_on_error)<|docstring|>Converts a dictionary object into a SQLAlchemy object. The dictionary keys should exactly match the attribute names of the SQLAlchemy object or a warning will be printed and missing attributes will be skipped. :param d: :return:<|endoftext|>
34cdc2b248531b5e3d7557e18db7aa8cc595ee8eceebc847e357509a2f45d0c7
def load_messages(self): '\n Retrieves a message (dict) from the DatabaseLoadingThread.queue\n Parses message into SQLAlchemy object\n Loads SQLAlchemy object into database\n Commits updates based on DatabaseLoadingThread.commit_interval\n Closes session and returns the last message processed\n :return:\n ' session = self.get_session() msg = None while True: try: msg = self.queue.get(timeout=1) if (not hasattr(msg, 'items')): if (msg == self.STOP_SIGNAL): break else: err_msg = "Got unexpected message: '{}'.\n Expecting dictionary-like messages that have .items()".format(msg) if self.raise_on_error: raise AttributeError(err_msg) else: logger.error(err_msg) continue rec = self.get_sql_record(msg) session.add(rec) self.count += 1 done = ((self.count % self.commit_interval) == 0) if done: break except Empty: break session.commit() session.close() return msg
Retrieves a message (dict) from the DatabaseLoadingThread.queue Parses message into SQLAlchemy object Loads SQLAlchemy object into database Commits updates based on DatabaseLoadingThread.commit_interval Closes session and returns the last message processed :return:
stocklook/utils/database.py
load_messages
zbarge/stocklook
149
python
def load_messages(self): '\n Retrieves a message (dict) from the DatabaseLoadingThread.queue\n Parses message into SQLAlchemy object\n Loads SQLAlchemy object into database\n Commits updates based on DatabaseLoadingThread.commit_interval\n Closes session and returns the last message processed\n :return:\n ' session = self.get_session() msg = None while True: try: msg = self.queue.get(timeout=1) if (not hasattr(msg, 'items')): if (msg == self.STOP_SIGNAL): break else: err_msg = "Got unexpected message: '{}'.\n Expecting dictionary-like messages that have .items()".format(msg) if self.raise_on_error: raise AttributeError(err_msg) else: logger.error(err_msg) continue rec = self.get_sql_record(msg) session.add(rec) self.count += 1 done = ((self.count % self.commit_interval) == 0) if done: break except Empty: break session.commit() session.close() return msg
def load_messages(self): '\n Retrieves a message (dict) from the DatabaseLoadingThread.queue\n Parses message into SQLAlchemy object\n Loads SQLAlchemy object into database\n Commits updates based on DatabaseLoadingThread.commit_interval\n Closes session and returns the last message processed\n :return:\n ' session = self.get_session() msg = None while True: try: msg = self.queue.get(timeout=1) if (not hasattr(msg, 'items')): if (msg == self.STOP_SIGNAL): break else: err_msg = "Got unexpected message: '{}'.\n Expecting dictionary-like messages that have .items()".format(msg) if self.raise_on_error: raise AttributeError(err_msg) else: logger.error(err_msg) continue rec = self.get_sql_record(msg) session.add(rec) self.count += 1 done = ((self.count % self.commit_interval) == 0) if done: break except Empty: break session.commit() session.close() return msg<|docstring|>Retrieves a message (dict) from the DatabaseLoadingThread.queue Parses message into SQLAlchemy object Loads SQLAlchemy object into database Commits updates based on DatabaseLoadingThread.commit_interval Closes session and returns the last message processed :return:<|endoftext|>
c115332d4aba264e7dba60b56ef8e488fc4822e4e3021eb6ebed5e4b5c68fe2a
@property def engine(self): "\n SQLAlchemy engine defaults to classname.sqlite3\n in the directory found in stocklook.config.config['DATA_DIRECTORY']\n :return:\n " if (self._engine is None): from stocklook.config import config, DATA_DIRECTORY db_name = '{}.sqlite3'.format(self.__class__.__name__.lower()) db_path = ('sqlite:///' + os.path.join(config[DATA_DIRECTORY], db_name)) self._engine = create_engine(db_path) if (self._declarative_base is not None): self._declarative_base.metadata.create_all(bind=self._engine) return self._engine
SQLAlchemy engine defaults to classname.sqlite3 in the directory found in stocklook.config.config['DATA_DIRECTORY'] :return:
stocklook/utils/database.py
engine
zbarge/stocklook
149
python
@property def engine(self): "\n SQLAlchemy engine defaults to classname.sqlite3\n in the directory found in stocklook.config.config['DATA_DIRECTORY']\n :return:\n " if (self._engine is None): from stocklook.config import config, DATA_DIRECTORY db_name = '{}.sqlite3'.format(self.__class__.__name__.lower()) db_path = ('sqlite:///' + os.path.join(config[DATA_DIRECTORY], db_name)) self._engine = create_engine(db_path) if (self._declarative_base is not None): self._declarative_base.metadata.create_all(bind=self._engine) return self._engine
@property def engine(self): "\n SQLAlchemy engine defaults to classname.sqlite3\n in the directory found in stocklook.config.config['DATA_DIRECTORY']\n :return:\n " if (self._engine is None): from stocklook.config import config, DATA_DIRECTORY db_name = '{}.sqlite3'.format(self.__class__.__name__.lower()) db_path = ('sqlite:///' + os.path.join(config[DATA_DIRECTORY], db_name)) self._engine = create_engine(db_path) if (self._declarative_base is not None): self._declarative_base.metadata.create_all(bind=self._engine) return self._engine<|docstring|>SQLAlchemy engine defaults to classname.sqlite3 in the directory found in stocklook.config.config['DATA_DIRECTORY'] :return:<|endoftext|>
18619f716d4a3a0fd94501aa9a864ba59e95a266312da0158fce088b247b1447
def dollar_format(suffix=''): "Dollar formatter for matplotlib.\n\n :param suffix: Suffix to append, e.g. 'B'. Defaults to ''.\n :returns: FuncFormatter.\n\n " return currency_format(currency='USD', suffix=suffix)
Dollar formatter for matplotlib. :param suffix: Suffix to append, e.g. 'B'. Defaults to ''. :returns: FuncFormatter.
microdf/chart_utils.py
dollar_format
MaxGhenis/microdf
6
python
def dollar_format(suffix=): "Dollar formatter for matplotlib.\n\n :param suffix: Suffix to append, e.g. 'B'. Defaults to .\n :returns: FuncFormatter.\n\n " return currency_format(currency='USD', suffix=suffix)
def dollar_format(suffix=): "Dollar formatter for matplotlib.\n\n :param suffix: Suffix to append, e.g. 'B'. Defaults to .\n :returns: FuncFormatter.\n\n " return currency_format(currency='USD', suffix=suffix)<|docstring|>Dollar formatter for matplotlib. :param suffix: Suffix to append, e.g. 'B'. Defaults to ''. :returns: FuncFormatter.<|endoftext|>
45a958d489aaf12c7c7e138ccf3763c98a1152966544b9e70dd4e1c418b2a8f1
def currency_format(currency='USD', suffix=''): "Currency formatter for matplotlib.\n\n :param currency: Name of the currency, e.g. 'USD', 'GBP'.\n :param suffix: Suffix to append, e.g. 'B'. Defaults to ''.\n :returns: FuncFormatter.\n\n " prefix = {'USD': '$', 'GBP': '£'}[currency] return mpl.ticker.FuncFormatter((lambda x, _: ((prefix + format(int(x), ',')) + suffix)))
Currency formatter for matplotlib. :param currency: Name of the currency, e.g. 'USD', 'GBP'. :param suffix: Suffix to append, e.g. 'B'. Defaults to ''. :returns: FuncFormatter.
microdf/chart_utils.py
currency_format
MaxGhenis/microdf
6
python
def currency_format(currency='USD', suffix=): "Currency formatter for matplotlib.\n\n :param currency: Name of the currency, e.g. 'USD', 'GBP'.\n :param suffix: Suffix to append, e.g. 'B'. Defaults to .\n :returns: FuncFormatter.\n\n " prefix = {'USD': '$', 'GBP': '£'}[currency] return mpl.ticker.FuncFormatter((lambda x, _: ((prefix + format(int(x), ',')) + suffix)))
def currency_format(currency='USD', suffix=): "Currency formatter for matplotlib.\n\n :param currency: Name of the currency, e.g. 'USD', 'GBP'.\n :param suffix: Suffix to append, e.g. 'B'. Defaults to .\n :returns: FuncFormatter.\n\n " prefix = {'USD': '$', 'GBP': '£'}[currency] return mpl.ticker.FuncFormatter((lambda x, _: ((prefix + format(int(x), ',')) + suffix)))<|docstring|>Currency formatter for matplotlib. :param currency: Name of the currency, e.g. 'USD', 'GBP'. :param suffix: Suffix to append, e.g. 'B'. Defaults to ''. :returns: FuncFormatter.<|endoftext|>
999584aa7b6bdbae56ee598b5081c62ba0ca22ead6409f9509e86fb3a269073a
def getAccountName(player, data): '\n Checks to see if the player exists, or helps create a new one.\n ' if (data.lower() == 'new'): player.setLoginState(MudConst.getNewAccountName) player.writePlain('Choose a character name: ') return if (not data.isalpha()): player.writePlain('\r\nCharacter names may only contain letters!') player.writePlain('\r\nPlease choose your name: ') return else: data = data.capitalize() char_file = (((MudConst.playerDir + os.sep) + data) + '.ply') if os.path.isfile(char_file): MudWorld.world.db.loadPlayer(data, player) player.writePlain('\r\nPassword: ') player.setLoginState(MudConst.getAccountPassword) else: player.writePlain('\r\nCharacter does not exist!') player.writePlain('\r\nEnter your character name: ')
Checks to see if the player exists, or helps create a new one.
MudLogin.py
getAccountName
fhaynes/slithermud
0
python
def getAccountName(player, data): '\n \n ' if (data.lower() == 'new'): player.setLoginState(MudConst.getNewAccountName) player.writePlain('Choose a character name: ') return if (not data.isalpha()): player.writePlain('\r\nCharacter names may only contain letters!') player.writePlain('\r\nPlease choose your name: ') return else: data = data.capitalize() char_file = (((MudConst.playerDir + os.sep) + data) + '.ply') if os.path.isfile(char_file): MudWorld.world.db.loadPlayer(data, player) player.writePlain('\r\nPassword: ') player.setLoginState(MudConst.getAccountPassword) else: player.writePlain('\r\nCharacter does not exist!') player.writePlain('\r\nEnter your character name: ')
def getAccountName(player, data): '\n \n ' if (data.lower() == 'new'): player.setLoginState(MudConst.getNewAccountName) player.writePlain('Choose a character name: ') return if (not data.isalpha()): player.writePlain('\r\nCharacter names may only contain letters!') player.writePlain('\r\nPlease choose your name: ') return else: data = data.capitalize() char_file = (((MudConst.playerDir + os.sep) + data) + '.ply') if os.path.isfile(char_file): MudWorld.world.db.loadPlayer(data, player) player.writePlain('\r\nPassword: ') player.setLoginState(MudConst.getAccountPassword) else: player.writePlain('\r\nCharacter does not exist!') player.writePlain('\r\nEnter your character name: ')<|docstring|>Checks to see if the player exists, or helps create a new one.<|endoftext|>
705658d6371bea671b2d9c096c2b6f3775d41f69d3d8d56ab80253fb615d5aad
def getAccountPassword(player, data): "\n Asks for the user's password and checks it.\n " if (player.getPassword() == data): logger.logging.info((player.getName() + ' entered the game!')) player.writePlain('\r\nPassword accepted!\r\n') player.writeWithPrompt(('Welcome, ' + player.getName())) player.setLoginState(MudConst.logedIn) newAction = MudAction.MudAction('enterworld', player, player.getZoneRef().getId(), player.getRoomRef().getId()) MudWorld.world.actionHandler.doAction(newAction) if ((player.getName() == 'Admin') or (player.getName() == 'Kuros')): MudWorld.world.cmdDb.loadAllCommand(player) else: player.writePlain('\r\nInvalid password. Try again: ') return
Asks for the user's password and checks it.
MudLogin.py
getAccountPassword
fhaynes/slithermud
0
python
def getAccountPassword(player, data): "\n \n " if (player.getPassword() == data): logger.logging.info((player.getName() + ' entered the game!')) player.writePlain('\r\nPassword accepted!\r\n') player.writeWithPrompt(('Welcome, ' + player.getName())) player.setLoginState(MudConst.logedIn) newAction = MudAction.MudAction('enterworld', player, player.getZoneRef().getId(), player.getRoomRef().getId()) MudWorld.world.actionHandler.doAction(newAction) if ((player.getName() == 'Admin') or (player.getName() == 'Kuros')): MudWorld.world.cmdDb.loadAllCommand(player) else: player.writePlain('\r\nInvalid password. Try again: ') return
def getAccountPassword(player, data): "\n \n " if (player.getPassword() == data): logger.logging.info((player.getName() + ' entered the game!')) player.writePlain('\r\nPassword accepted!\r\n') player.writeWithPrompt(('Welcome, ' + player.getName())) player.setLoginState(MudConst.logedIn) newAction = MudAction.MudAction('enterworld', player, player.getZoneRef().getId(), player.getRoomRef().getId()) MudWorld.world.actionHandler.doAction(newAction) if ((player.getName() == 'Admin') or (player.getName() == 'Kuros')): MudWorld.world.cmdDb.loadAllCommand(player) else: player.writePlain('\r\nInvalid password. Try again: ') return<|docstring|>Asks for the user's password and checks it.<|endoftext|>
1e38f3dbf80b7b89b578e9fd149cb2c0bc9742b289d6b2fd9b1bfe47557cc7f3
def getNewAccountName(player, data): '\n Gets new account name if the user is creating a new one.\n ' if (not data.isalpha()): player.writePlain('\r\nAccount names must be letters only!') player.writePlain('\r\nPlease choose a name: ') return data = data.capitalize() char_file = ((MudConst.playerDir + os.sep) + '.ply') if os.path.isfile(char_file): player.writePlain('\r\nThat name is already in use!') player.writePlain('\r\nPlease choose a name: ') return player.writePlain(('Your character name will be: ' + data)) player.writePlain('\r\nIs that ok? (Y/N): ') player.setName(data) player.setLoginState(MudConst.confirmNewAccountName)
Gets new account name if the user is creating a new one.
MudLogin.py
getNewAccountName
fhaynes/slithermud
0
python
def getNewAccountName(player, data): '\n \n ' if (not data.isalpha()): player.writePlain('\r\nAccount names must be letters only!') player.writePlain('\r\nPlease choose a name: ') return data = data.capitalize() char_file = ((MudConst.playerDir + os.sep) + '.ply') if os.path.isfile(char_file): player.writePlain('\r\nThat name is already in use!') player.writePlain('\r\nPlease choose a name: ') return player.writePlain(('Your character name will be: ' + data)) player.writePlain('\r\nIs that ok? (Y/N): ') player.setName(data) player.setLoginState(MudConst.confirmNewAccountName)
def getNewAccountName(player, data): '\n \n ' if (not data.isalpha()): player.writePlain('\r\nAccount names must be letters only!') player.writePlain('\r\nPlease choose a name: ') return data = data.capitalize() char_file = ((MudConst.playerDir + os.sep) + '.ply') if os.path.isfile(char_file): player.writePlain('\r\nThat name is already in use!') player.writePlain('\r\nPlease choose a name: ') return player.writePlain(('Your character name will be: ' + data)) player.writePlain('\r\nIs that ok? (Y/N): ') player.setName(data) player.setLoginState(MudConst.confirmNewAccountName)<|docstring|>Gets new account name if the user is creating a new one.<|endoftext|>
95cac022c1374884bb7c702168ebb890ad0a3bafee27466d62be387143317ab6
def confirmNewAccountName(player, data): '\n Confirms the new account name.\n ' if (data.lower() == 'y'): player.writePlain('\r\nChoose a password: ') player.setLoginState(MudConst.getNewAccountPass) elif (data.lower() == 'n'): player.writePlain('\r\nChoose a character name: ') player.setLoginState(MudConst.getNewAccountName) else: player.writePlain('\r\nPlease type Y or N: ') return
Confirms the new account name.
MudLogin.py
confirmNewAccountName
fhaynes/slithermud
0
python
def confirmNewAccountName(player, data): '\n \n ' if (data.lower() == 'y'): player.writePlain('\r\nChoose a password: ') player.setLoginState(MudConst.getNewAccountPass) elif (data.lower() == 'n'): player.writePlain('\r\nChoose a character name: ') player.setLoginState(MudConst.getNewAccountName) else: player.writePlain('\r\nPlease type Y or N: ') return
def confirmNewAccountName(player, data): '\n \n ' if (data.lower() == 'y'): player.writePlain('\r\nChoose a password: ') player.setLoginState(MudConst.getNewAccountPass) elif (data.lower() == 'n'): player.writePlain('\r\nChoose a character name: ') player.setLoginState(MudConst.getNewAccountName) else: player.writePlain('\r\nPlease type Y or N: ') return<|docstring|>Confirms the new account name.<|endoftext|>
11812b7c22d3aaa755c855bdd229d8e547970b02b885ad56dec26d29a867aaf9
def getNewAccountPass(player, data): '\n Gets a new account password if the player is creating a new accont.\n ' if (not data.isalnum()): player.writePlain('Passwords must contain only letters or numbers!') player.writePlain('Choose a password: ') return player.writePlain(('Your password will be: ' + data)) player.writePlain('\r\nIs that ok? (Y/N): ') player.setPassword(data) player.setLoginState(MudConst.confirmNewAccountPass)
Gets a new account password if the player is creating a new accont.
MudLogin.py
getNewAccountPass
fhaynes/slithermud
0
python
def getNewAccountPass(player, data): '\n \n ' if (not data.isalnum()): player.writePlain('Passwords must contain only letters or numbers!') player.writePlain('Choose a password: ') return player.writePlain(('Your password will be: ' + data)) player.writePlain('\r\nIs that ok? (Y/N): ') player.setPassword(data) player.setLoginState(MudConst.confirmNewAccountPass)
def getNewAccountPass(player, data): '\n \n ' if (not data.isalnum()): player.writePlain('Passwords must contain only letters or numbers!') player.writePlain('Choose a password: ') return player.writePlain(('Your password will be: ' + data)) player.writePlain('\r\nIs that ok? (Y/N): ') player.setPassword(data) player.setLoginState(MudConst.confirmNewAccountPass)<|docstring|>Gets a new account password if the player is creating a new accont.<|endoftext|>
4a0e991109922f7293085a5beeae4c8b1e446b7c1220dd0c9be0ff0de5e09a01
def confirmNewAccountPass(player, data): '\n Confirms the new password for the account.\n ' if (data.lower() == 'y'): logger.logging.info((('New character: ' + player.info['name']) + 'logged in.')) player.writePlain('Character created.\r\n') player.writeWithPrompt(('Welcome, ' + player.getName())) player.setLoginState(MudConst.logedIn) newAction = MudAction.MudAction('enterworld', player, 1, 1) MudWorld.world.actionHandler.doAction(newAction) MudWorld.world.cmdDb.loadPlayerCommands(player) (name, gen) = MudWorld.world.logicDb.getLogic('genericPlayer') player.addLogic(name, gen) MudWorld.world.db.savePlayer(player) elif (data.lower() == 'n'): player.writePlain('\r\nChoose a password: ') player.setLoginState(MudConst.getNewAccountPass) return else: player.writePlain('\r\nInvalid choice. Try again: ') return
Confirms the new password for the account.
MudLogin.py
confirmNewAccountPass
fhaynes/slithermud
0
python
def confirmNewAccountPass(player, data): '\n \n ' if (data.lower() == 'y'): logger.logging.info((('New character: ' + player.info['name']) + 'logged in.')) player.writePlain('Character created.\r\n') player.writeWithPrompt(('Welcome, ' + player.getName())) player.setLoginState(MudConst.logedIn) newAction = MudAction.MudAction('enterworld', player, 1, 1) MudWorld.world.actionHandler.doAction(newAction) MudWorld.world.cmdDb.loadPlayerCommands(player) (name, gen) = MudWorld.world.logicDb.getLogic('genericPlayer') player.addLogic(name, gen) MudWorld.world.db.savePlayer(player) elif (data.lower() == 'n'): player.writePlain('\r\nChoose a password: ') player.setLoginState(MudConst.getNewAccountPass) return else: player.writePlain('\r\nInvalid choice. Try again: ') return
def confirmNewAccountPass(player, data): '\n \n ' if (data.lower() == 'y'): logger.logging.info((('New character: ' + player.info['name']) + 'logged in.')) player.writePlain('Character created.\r\n') player.writeWithPrompt(('Welcome, ' + player.getName())) player.setLoginState(MudConst.logedIn) newAction = MudAction.MudAction('enterworld', player, 1, 1) MudWorld.world.actionHandler.doAction(newAction) MudWorld.world.cmdDb.loadPlayerCommands(player) (name, gen) = MudWorld.world.logicDb.getLogic('genericPlayer') player.addLogic(name, gen) MudWorld.world.db.savePlayer(player) elif (data.lower() == 'n'): player.writePlain('\r\nChoose a password: ') player.setLoginState(MudConst.getNewAccountPass) return else: player.writePlain('\r\nInvalid choice. Try again: ') return<|docstring|>Confirms the new password for the account.<|endoftext|>
18fd171e2cbe944d76433de5417bcafef4f7246cabc3a6ad6066d23baa7e3412
def processLogin(player, data): "\n Decides which function to call based on the user's login state.\n \n Calls the appropiate function.\n " if (player.getLoginState() == MudConst.getAccountName): getAccountName(player, data) elif (player.getLoginState() == MudConst.getAccountPassword): getAccountPassword(player, data) elif (player.getLoginState() == MudConst.getNewAccountName): getNewAccountName(player, data) elif (player.getLoginState() == MudConst.confirmNewAccountName): confirmNewAccountName(player, data) elif (player.getLoginState() == MudConst.getNewAccountPass): getNewAccountPass(player, data) elif (player.getLoginState() == MudConst.confirmNewAccountPass): confirmNewAccountPass(player, data) else: pass
Decides which function to call based on the user's login state. Calls the appropiate function.
MudLogin.py
processLogin
fhaynes/slithermud
0
python
def processLogin(player, data): "\n Decides which function to call based on the user's login state.\n \n Calls the appropiate function.\n " if (player.getLoginState() == MudConst.getAccountName): getAccountName(player, data) elif (player.getLoginState() == MudConst.getAccountPassword): getAccountPassword(player, data) elif (player.getLoginState() == MudConst.getNewAccountName): getNewAccountName(player, data) elif (player.getLoginState() == MudConst.confirmNewAccountName): confirmNewAccountName(player, data) elif (player.getLoginState() == MudConst.getNewAccountPass): getNewAccountPass(player, data) elif (player.getLoginState() == MudConst.confirmNewAccountPass): confirmNewAccountPass(player, data) else: pass
def processLogin(player, data): "\n Decides which function to call based on the user's login state.\n \n Calls the appropiate function.\n " if (player.getLoginState() == MudConst.getAccountName): getAccountName(player, data) elif (player.getLoginState() == MudConst.getAccountPassword): getAccountPassword(player, data) elif (player.getLoginState() == MudConst.getNewAccountName): getNewAccountName(player, data) elif (player.getLoginState() == MudConst.confirmNewAccountName): confirmNewAccountName(player, data) elif (player.getLoginState() == MudConst.getNewAccountPass): getNewAccountPass(player, data) elif (player.getLoginState() == MudConst.confirmNewAccountPass): confirmNewAccountPass(player, data) else: pass<|docstring|>Decides which function to call based on the user's login state. Calls the appropiate function.<|endoftext|>
1de99767c2df429ea1e6779bfe925c2df4a301ceb11ec358f51d0eeb65832bf2
def _set_attributes(self, identifier): '\n Set the attributes of an atom based on the unique "indentifier" provided.\n The attributes are read from the elements.json file.\n ' attrs = ['name', 'symbol', 'atomic_number', 'mass'] atom = element(identifier) for attr in attrs: setattr(self, attr, getattr(atom, attr)) if self.is_dummy: self.set_atomic_number(0.0)
Set the attributes of an atom based on the unique "indentifier" provided. The attributes are read from the elements.json file.
chemtools/molecule.py
_set_attributes
lmmentel/chemtools
7
python
def _set_attributes(self, identifier): '\n Set the attributes of an atom based on the unique "indentifier" provided.\n The attributes are read from the elements.json file.\n ' attrs = ['name', 'symbol', 'atomic_number', 'mass'] atom = element(identifier) for attr in attrs: setattr(self, attr, getattr(atom, attr)) if self.is_dummy: self.set_atomic_number(0.0)
def _set_attributes(self, identifier): '\n Set the attributes of an atom based on the unique "indentifier" provided.\n The attributes are read from the elements.json file.\n ' attrs = ['name', 'symbol', 'atomic_number', 'mass'] atom = element(identifier) for attr in attrs: setattr(self, attr, getattr(atom, attr)) if self.is_dummy: self.set_atomic_number(0.0)<|docstring|>Set the attributes of an atom based on the unique "indentifier" provided. The attributes are read from the elements.json file.<|endoftext|>
fb4ed87b3e7e0df15eafbb0efb26d406824dd2dbe6ceb4de6821a580ea679848
def move(self, x=0.0, y=0.0, z=0.0): 'Move atom to a set of new coordinates given in xyz' self.xyz = np.asarray([x, y, z], dtype=self._dtxyz)
Move atom to a set of new coordinates given in xyz
chemtools/molecule.py
move
lmmentel/chemtools
7
python
def move(self, x=0.0, y=0.0, z=0.0): self.xyz = np.asarray([x, y, z], dtype=self._dtxyz)
def move(self, x=0.0, y=0.0, z=0.0): self.xyz = np.asarray([x, y, z], dtype=self._dtxyz)<|docstring|>Move atom to a set of new coordinates given in xyz<|endoftext|>
69b44f809ca11914ed8f05d2e200c91abddeb10ce042cc9c83d6cb3056593108
def unique(self): 'Get a list of unique atom specified by unique keyword' return [self.atoms[i] for i in self.unique_labels]
Get a list of unique atom specified by unique keyword
chemtools/molecule.py
unique
lmmentel/chemtools
7
python
def unique(self): return [self.atoms[i] for i in self.unique_labels]
def unique(self): return [self.atoms[i] for i in self.unique_labels]<|docstring|>Get a list of unique atom specified by unique keyword<|endoftext|>
196a217d078535b7a00883eafb164461698164555acd27bad595195df590683c
def nele(self): 'Get the total number of electrons in a molecule.' nelectrons = 0 for atom in self.atoms: if (atom.atomic_number > 0): nelectrons += atom.atomic_number return (nelectrons - self.charge)
Get the total number of electrons in a molecule.
chemtools/molecule.py
nele
lmmentel/chemtools
7
python
def nele(self): nelectrons = 0 for atom in self.atoms: if (atom.atomic_number > 0): nelectrons += atom.atomic_number return (nelectrons - self.charge)
def nele(self): nelectrons = 0 for atom in self.atoms: if (atom.atomic_number > 0): nelectrons += atom.atomic_number return (nelectrons - self.charge)<|docstring|>Get the total number of electrons in a molecule.<|endoftext|>
9806cbcd985161c0bffa411f2c4d5681434243a33e0c6be2a35d18cf8bd41ad8
def get_distance(self, atom1, atom2): 'Calcualte the distance between two atoms.' dist = 0.0 for i in range(3): dist += ((self.atoms[atom1].xyz[i] - self.atoms[atom2].xyz[i]) ** 2) return sqrt(dist)
Calcualte the distance between two atoms.
chemtools/molecule.py
get_distance
lmmentel/chemtools
7
python
def get_distance(self, atom1, atom2): dist = 0.0 for i in range(3): dist += ((self.atoms[atom1].xyz[i] - self.atoms[atom2].xyz[i]) ** 2) return sqrt(dist)
def get_distance(self, atom1, atom2): dist = 0.0 for i in range(3): dist += ((self.atoms[atom1].xyz[i] - self.atoms[atom2].xyz[i]) ** 2) return sqrt(dist)<|docstring|>Calcualte the distance between two atoms.<|endoftext|>
7b1703404549c23e3497836bb08ae83e32fcbbc2e9cf14eb899d212ab70fc723
def __str__(self): 'Print formatted molecule data.' out = 'Name: {n:<10s} Charge: {c:<10d} Multiplicty: {m:<10d} Electrons: {e:<10d}\n'.format(n=self.name, c=self.charge, m=self.multiplicity, e=self.nele()) out += 'Atoms:\n' out += '{0:<10s} {1:^14s}\t{2:^15s}{3:^15s}{4:^15s}\n'.format('Element', 'Nuclear Charge', 'x', 'y', 'z') for atom in self.atoms: out += str(atom) return out
Print formatted molecule data.
chemtools/molecule.py
__str__
lmmentel/chemtools
7
python
def __str__(self): out = 'Name: {n:<10s} Charge: {c:<10d} Multiplicty: {m:<10d} Electrons: {e:<10d}\n'.format(n=self.name, c=self.charge, m=self.multiplicity, e=self.nele()) out += 'Atoms:\n' out += '{0:<10s} {1:^14s}\t{2:^15s}{3:^15s}{4:^15s}\n'.format('Element', 'Nuclear Charge', 'x', 'y', 'z') for atom in self.atoms: out += str(atom) return out
def __str__(self): out = 'Name: {n:<10s} Charge: {c:<10d} Multiplicty: {m:<10d} Electrons: {e:<10d}\n'.format(n=self.name, c=self.charge, m=self.multiplicity, e=self.nele()) out += 'Atoms:\n' out += '{0:<10s} {1:^14s}\t{2:^15s}{3:^15s}{4:^15s}\n'.format('Element', 'Nuclear Charge', 'x', 'y', 'z') for atom in self.atoms: out += str(atom) return out<|docstring|>Print formatted molecule data.<|endoftext|>
7feba9d8d77ed862dc681556ab7cd080cad51dcfbe9ae36721e1ef4b96c04923
def get_find_groups(self, places_filters, keyword_filters): '\n Get groups from the meetup api based on the given filters and filter them by keywords\n :param places_filters: List, dicts of country codes and cities to be filtered\n :param keyword_filters: List, words of approved groups\n :return List: Groups that were found in the meetup api\n ' found_places = [] for place in places_filters: find_group_filter = {'country': place['country'], 'location': place['location'], 'text': 'python', 'distance': 100} python_groups = self.__client__.GetFindGroups(find_group_filter) for python_group in python_groups: for keyword_filter in keyword_filters: if (keyword_filter in python_group.name): found_places.append({'country_code': python_group.country, 'country_name': python_group.localized_country_name, 'city': python_group.city, 'members': python_group.members, 'name': python_group.name}) return found_places
Get groups from the meetup api based on the given filters and filter them by keywords :param places_filters: List, dicts of country codes and cities to be filtered :param keyword_filters: List, words of approved groups :return List: Groups that were found in the meetup api
app/utils/meetup_utils.py
get_find_groups
ColombiaPython/meetup-api-python-groups
1
python
def get_find_groups(self, places_filters, keyword_filters): '\n Get groups from the meetup api based on the given filters and filter them by keywords\n :param places_filters: List, dicts of country codes and cities to be filtered\n :param keyword_filters: List, words of approved groups\n :return List: Groups that were found in the meetup api\n ' found_places = [] for place in places_filters: find_group_filter = {'country': place['country'], 'location': place['location'], 'text': 'python', 'distance': 100} python_groups = self.__client__.GetFindGroups(find_group_filter) for python_group in python_groups: for keyword_filter in keyword_filters: if (keyword_filter in python_group.name): found_places.append({'country_code': python_group.country, 'country_name': python_group.localized_country_name, 'city': python_group.city, 'members': python_group.members, 'name': python_group.name}) return found_places
def get_find_groups(self, places_filters, keyword_filters): '\n Get groups from the meetup api based on the given filters and filter them by keywords\n :param places_filters: List, dicts of country codes and cities to be filtered\n :param keyword_filters: List, words of approved groups\n :return List: Groups that were found in the meetup api\n ' found_places = [] for place in places_filters: find_group_filter = {'country': place['country'], 'location': place['location'], 'text': 'python', 'distance': 100} python_groups = self.__client__.GetFindGroups(find_group_filter) for python_group in python_groups: for keyword_filter in keyword_filters: if (keyword_filter in python_group.name): found_places.append({'country_code': python_group.country, 'country_name': python_group.localized_country_name, 'city': python_group.city, 'members': python_group.members, 'name': python_group.name}) return found_places<|docstring|>Get groups from the meetup api based on the given filters and filter them by keywords :param places_filters: List, dicts of country codes and cities to be filtered :param keyword_filters: List, words of approved groups :return List: Groups that were found in the meetup api<|endoftext|>