body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
3eb24eb0dfed8f90cd0723012f3d6c41ffb4b3c9b21357026189e83b2345364e | def form_subsystems_from_single_includers(subsystems):
'\n For each item, if that item does not include anything, check if it is only included by one file.\n :param subsystems:\n :return:\n '
relationships = (includes + inheritance)
for sys in subsystems:
if (len(sys) is not 1):
continue
else:
item = sys[0]
item_includes_anything = False
for tup in relationships:
if (tup[0] is item):
item_includes_anything = True
if item_includes_anything:
continue
else:
all_files_that_include_this_one = [r[0] for r in relationships if (r[1] == item)]
if (len(all_files_that_include_this_one) is 1):
for system in subsystems:
if (all_files_that_include_this_one[0] in system):
system.append(item)
return subsystems | For each item, if that item does not include anything, check if it is only included by one file.
:param subsystems:
:return: | Tools/DiagramGenerator/dotGenerator.py | form_subsystems_from_single_includers | shastrihm/BrainGrid | 0 | python | def form_subsystems_from_single_includers(subsystems):
'\n For each item, if that item does not include anything, check if it is only included by one file.\n :param subsystems:\n :return:\n '
relationships = (includes + inheritance)
for sys in subsystems:
if (len(sys) is not 1):
continue
else:
item = sys[0]
item_includes_anything = False
for tup in relationships:
if (tup[0] is item):
item_includes_anything = True
if item_includes_anything:
continue
else:
all_files_that_include_this_one = [r[0] for r in relationships if (r[1] == item)]
if (len(all_files_that_include_this_one) is 1):
for system in subsystems:
if (all_files_that_include_this_one[0] in system):
system.append(item)
return subsystems | def form_subsystems_from_single_includers(subsystems):
'\n For each item, if that item does not include anything, check if it is only included by one file.\n :param subsystems:\n :return:\n '
relationships = (includes + inheritance)
for sys in subsystems:
if (len(sys) is not 1):
continue
else:
item = sys[0]
item_includes_anything = False
for tup in relationships:
if (tup[0] is item):
item_includes_anything = True
if item_includes_anything:
continue
else:
all_files_that_include_this_one = [r[0] for r in relationships if (r[1] == item)]
if (len(all_files_that_include_this_one) is 1):
for system in subsystems:
if (all_files_that_include_this_one[0] in system):
system.append(item)
return subsystems<|docstring|>For each item, if that item does not include anything, check if it is only included by one file.
:param subsystems:
:return:<|endoftext|> |
066ef99a8f40e833efa57c516a6e76926bd334405c02c478eef4421ade37a62a | def get_subgraphs():
'\n Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).\n :return: A list of lists of dictionaries.\n '
subgraph_list = [c.get('color') for c in classes if (c.get('color') is not None)]
subgraphs = []
for c in subgraph_list:
sub = [cl for cl in classes if ((cl.get('color') == c) and cl)]
if (sub not in subgraphs):
subgraphs.append(sub)
for c in classes:
if (c.get('color') is None):
sub = [c]
subgraphs.append(sub)
return subgraphs | Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).
:return: A list of lists of dictionaries. | Tools/DiagramGenerator/dotGenerator.py | get_subgraphs | shastrihm/BrainGrid | 0 | python | def get_subgraphs():
'\n Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).\n :return: A list of lists of dictionaries.\n '
subgraph_list = [c.get('color') for c in classes if (c.get('color') is not None)]
subgraphs = []
for c in subgraph_list:
sub = [cl for cl in classes if ((cl.get('color') == c) and cl)]
if (sub not in subgraphs):
subgraphs.append(sub)
for c in classes:
if (c.get('color') is None):
sub = [c]
subgraphs.append(sub)
return subgraphs | def get_subgraphs():
'\n Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).\n :return: A list of lists of dictionaries.\n '
subgraph_list = [c.get('color') for c in classes if (c.get('color') is not None)]
subgraphs = []
for c in subgraph_list:
sub = [cl for cl in classes if ((cl.get('color') == c) and cl)]
if (sub not in subgraphs):
subgraphs.append(sub)
for c in classes:
if (c.get('color') is None):
sub = [c]
subgraphs.append(sub)
return subgraphs<|docstring|>Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).
:return: A list of lists of dictionaries.<|endoftext|> |
db8138bb28c8113b546b588ae514340b9fb6b65c3f4058931fe3608881bbe140 | def get_sub_name_new_style(sub):
'\n Gets the name of the passed in subgraph. The subgraph that is passed in is a list of names.\n :param sub:\n :return: The name of the passed in subgraph.\n '
for name in iter(global_dict_subsystems.keys()):
system = global_dict_subsystems[name]
if (set(system) == set(sub)):
return name
return 'NAME_ERROR' | Gets the name of the passed in subgraph. The subgraph that is passed in is a list of names.
:param sub:
:return: The name of the passed in subgraph. | Tools/DiagramGenerator/dotGenerator.py | get_sub_name_new_style | shastrihm/BrainGrid | 0 | python | def get_sub_name_new_style(sub):
'\n Gets the name of the passed in subgraph. The subgraph that is passed in is a list of names.\n :param sub:\n :return: The name of the passed in subgraph.\n '
for name in iter(global_dict_subsystems.keys()):
system = global_dict_subsystems[name]
if (set(system) == set(sub)):
return name
return 'NAME_ERROR' | def get_sub_name_new_style(sub):
'\n Gets the name of the passed in subgraph. The subgraph that is passed in is a list of names.\n :param sub:\n :return: The name of the passed in subgraph.\n '
for name in iter(global_dict_subsystems.keys()):
system = global_dict_subsystems[name]
if (set(system) == set(sub)):
return name
return 'NAME_ERROR'<|docstring|>Gets the name of the passed in subgraph. The subgraph that is passed in is a list of names.
:param sub:
:return: The name of the passed in subgraph.<|endoftext|> |
7b0234c04ba9f9fc1a640114880c5722331635d3308756271ec6a9ae3ca437e1 | def get_top_files():
'\n Returns a list of lists. Each list is all the top files from a subsystem, where a top file is defined as one which\n is included by nothing else in its own subsystem.\n :return:\n '
top_levels = []
subsystems = list(set([d.get('color') for d in classes if (d.get('color') is not None)]))
relationships = (includes + inheritance)
f = (lambda tup, sub: ((tup[0] in sub) and (tup[1] in sub)))
for s in subsystems:
subsystem = [c.get('name') for c in classes if (c.get('color') == s)]
subsystem_relationships = [t for t in relationships if f(t, subsystem)]
top_lambda = (lambda c, sub_rel: (len([t for t in sub_rel if (t[1] == c)]) is 0))
top_files = [c for c in subsystem if top_lambda(c, subsystem_relationships)]
top_levels.append(top_files)
return top_levels | Returns a list of lists. Each list is all the top files from a subsystem, where a top file is defined as one which
is included by nothing else in its own subsystem.
:return: | Tools/DiagramGenerator/dotGenerator.py | get_top_files | shastrihm/BrainGrid | 0 | python | def get_top_files():
'\n Returns a list of lists. Each list is all the top files from a subsystem, where a top file is defined as one which\n is included by nothing else in its own subsystem.\n :return:\n '
top_levels = []
subsystems = list(set([d.get('color') for d in classes if (d.get('color') is not None)]))
relationships = (includes + inheritance)
f = (lambda tup, sub: ((tup[0] in sub) and (tup[1] in sub)))
for s in subsystems:
subsystem = [c.get('name') for c in classes if (c.get('color') == s)]
subsystem_relationships = [t for t in relationships if f(t, subsystem)]
top_lambda = (lambda c, sub_rel: (len([t for t in sub_rel if (t[1] == c)]) is 0))
top_files = [c for c in subsystem if top_lambda(c, subsystem_relationships)]
top_levels.append(top_files)
return top_levels | def get_top_files():
'\n Returns a list of lists. Each list is all the top files from a subsystem, where a top file is defined as one which\n is included by nothing else in its own subsystem.\n :return:\n '
top_levels = []
subsystems = list(set([d.get('color') for d in classes if (d.get('color') is not None)]))
relationships = (includes + inheritance)
f = (lambda tup, sub: ((tup[0] in sub) and (tup[1] in sub)))
for s in subsystems:
subsystem = [c.get('name') for c in classes if (c.get('color') == s)]
subsystem_relationships = [t for t in relationships if f(t, subsystem)]
top_lambda = (lambda c, sub_rel: (len([t for t in sub_rel if (t[1] == c)]) is 0))
top_files = [c for c in subsystem if top_lambda(c, subsystem_relationships)]
top_levels.append(top_files)
return top_levels<|docstring|>Returns a list of lists. Each list is all the top files from a subsystem, where a top file is defined as one which
is included by nothing else in its own subsystem.
:return:<|endoftext|> |
e5c6be8c917e69fd0df86dfd684186a344573a7bf5b30bbfd40722b659f361c7 | def hash_all_files():
'\n Walks through all the directories from working one down and hashes those files that exist.\n :return: Nothing\n '
dir_tree = os.walk('.')
for (root, dirnames, filenames) in dir_tree:
d = os.path.basename(root)
if (d in ignores):
continue
else:
files_to_hash = [f_name for f_name in filenames if (('.' + f_name.split('.')[(- 1)]) in allowable_file_types)]
for f_name in files_to_hash:
__file_hash[f_name] = os.path.join(root, f_name) | Walks through all the directories from working one down and hashes those files that exist.
:return: Nothing | Tools/DiagramGenerator/dotGenerator.py | hash_all_files | shastrihm/BrainGrid | 0 | python | def hash_all_files():
'\n Walks through all the directories from working one down and hashes those files that exist.\n :return: Nothing\n '
dir_tree = os.walk('.')
for (root, dirnames, filenames) in dir_tree:
d = os.path.basename(root)
if (d in ignores):
continue
else:
files_to_hash = [f_name for f_name in filenames if (('.' + f_name.split('.')[(- 1)]) in allowable_file_types)]
for f_name in files_to_hash:
__file_hash[f_name] = os.path.join(root, f_name) | def hash_all_files():
'\n Walks through all the directories from working one down and hashes those files that exist.\n :return: Nothing\n '
dir_tree = os.walk('.')
for (root, dirnames, filenames) in dir_tree:
d = os.path.basename(root)
if (d in ignores):
continue
else:
files_to_hash = [f_name for f_name in filenames if (('.' + f_name.split('.')[(- 1)]) in allowable_file_types)]
for f_name in files_to_hash:
__file_hash[f_name] = os.path.join(root, f_name)<|docstring|>Walks through all the directories from working one down and hashes those files that exist.
:return: Nothing<|endoftext|> |
412c7ac8fc153bdf8ca8c95a63f4771da5008647627cf72cdc338f426a37c368 | def is_inheritance(derived, base):
'\n This function determines if the argument "derived" is inherited from the argument "base".\n '
try:
derived_file = find_file((derived + '.h'), 'rb')
except IOError as ex:
return False
lines = [str(line) for line in derived_file]
derived_file.close()
contents = ''
for line in lines:
contents += line
regex = (((('(class)(\\s)+(' + derived) + ')(\\s)+(:)(.)*(') + base) + ')(.)*')
pattern = re.compile(regex)
match_obj = pattern.search(contents)
if match_obj:
return True
else:
return False | This function determines if the argument "derived" is inherited from the argument "base". | Tools/DiagramGenerator/dotGenerator.py | is_inheritance | shastrihm/BrainGrid | 0 | python | def is_inheritance(derived, base):
'\n \n '
try:
derived_file = find_file((derived + '.h'), 'rb')
except IOError as ex:
return False
lines = [str(line) for line in derived_file]
derived_file.close()
contents =
for line in lines:
contents += line
regex = (((('(class)(\\s)+(' + derived) + ')(\\s)+(:)(.)*(') + base) + ')(.)*')
pattern = re.compile(regex)
match_obj = pattern.search(contents)
if match_obj:
return True
else:
return False | def is_inheritance(derived, base):
'\n \n '
try:
derived_file = find_file((derived + '.h'), 'rb')
except IOError as ex:
return False
lines = [str(line) for line in derived_file]
derived_file.close()
contents =
for line in lines:
contents += line
regex = (((('(class)(\\s)+(' + derived) + ')(\\s)+(:)(.)*(') + base) + ')(.)*')
pattern = re.compile(regex)
match_obj = pattern.search(contents)
if match_obj:
return True
else:
return False<|docstring|>This function determines if the argument "derived" is inherited from the argument "base".<|endoftext|> |
70949562ca3418939975d85eada66d702f5921bf95f7daaf24aeff8e3eef7d0b | def list_includes_any_items_from_other_list(list_a, list_b):
"\n This method doesn't do at all what it sounds like.\n It checks list_a and returns True if any of the items in it include any of the items in list b.\n That is, if any item in list_a inherits from or includes any item in list_b, this method returns True.\n False otherwise.\n :param list_a:\n :param list_b:\n :return:\n "
all_includes = (includes + inheritance)
for a in list_a:
for b in list_b:
tup = (a, b)
if (tup in all_includes):
return True
return False | This method doesn't do at all what it sounds like.
It checks list_a and returns True if any of the items in it include any of the items in list b.
That is, if any item in list_a inherits from or includes any item in list_b, this method returns True.
False otherwise.
:param list_a:
:param list_b:
:return: | Tools/DiagramGenerator/dotGenerator.py | list_includes_any_items_from_other_list | shastrihm/BrainGrid | 0 | python | def list_includes_any_items_from_other_list(list_a, list_b):
"\n This method doesn't do at all what it sounds like.\n It checks list_a and returns True if any of the items in it include any of the items in list b.\n That is, if any item in list_a inherits from or includes any item in list_b, this method returns True.\n False otherwise.\n :param list_a:\n :param list_b:\n :return:\n "
all_includes = (includes + inheritance)
for a in list_a:
for b in list_b:
tup = (a, b)
if (tup in all_includes):
return True
return False | def list_includes_any_items_from_other_list(list_a, list_b):
"\n This method doesn't do at all what it sounds like.\n It checks list_a and returns True if any of the items in it include any of the items in list b.\n That is, if any item in list_a inherits from or includes any item in list_b, this method returns True.\n False otherwise.\n :param list_a:\n :param list_b:\n :return:\n "
all_includes = (includes + inheritance)
for a in list_a:
for b in list_b:
tup = (a, b)
if (tup in all_includes):
return True
return False<|docstring|>This method doesn't do at all what it sounds like.
It checks list_a and returns True if any of the items in it include any of the items in list b.
That is, if any item in list_a inherits from or includes any item in list_b, this method returns True.
False otherwise.
:param list_a:
:param list_b:
:return:<|endoftext|> |
b61a9c92b901b8d4067dfe4017d12b3277ce373189874320e0b2ee9721f9da89 | def map_directories(file_paths):
'\n Maps the subsystems globally using the new method of system detection (directory structures).\n :param file_paths: The file paths of each file that corresponds to this class/module.\n :return:\n '
votes = {}
for path in file_paths:
path_minus_name = os.sep.join(path.strip().split(os.sep)[0:(- 1)])
votes[path_minus_name] = ((votes[path_minus_name] + 1) if (path_minus_name in votes) else 1)
item_path = ''
total_votes = 0
for key in iter(votes.keys()):
if (votes[key] >= total_votes):
total_votes = votes[key]
item_path = key
folder = item_path.split(os.sep)[(- 1)]
file_name = file_paths[0].split(os.sep)[(- 1)].split('.')[0]
if (folder in global_dict_subsystems):
global_dict_subsystems[folder].append(file_name)
else:
global_dict_subsystems[folder] = [file_name] | Maps the subsystems globally using the new method of system detection (directory structures).
:param file_paths: The file paths of each file that corresponds to this class/module.
:return: | Tools/DiagramGenerator/dotGenerator.py | map_directories | shastrihm/BrainGrid | 0 | python | def map_directories(file_paths):
'\n Maps the subsystems globally using the new method of system detection (directory structures).\n :param file_paths: The file paths of each file that corresponds to this class/module.\n :return:\n '
votes = {}
for path in file_paths:
path_minus_name = os.sep.join(path.strip().split(os.sep)[0:(- 1)])
votes[path_minus_name] = ((votes[path_minus_name] + 1) if (path_minus_name in votes) else 1)
item_path =
total_votes = 0
for key in iter(votes.keys()):
if (votes[key] >= total_votes):
total_votes = votes[key]
item_path = key
folder = item_path.split(os.sep)[(- 1)]
file_name = file_paths[0].split(os.sep)[(- 1)].split('.')[0]
if (folder in global_dict_subsystems):
global_dict_subsystems[folder].append(file_name)
else:
global_dict_subsystems[folder] = [file_name] | def map_directories(file_paths):
'\n Maps the subsystems globally using the new method of system detection (directory structures).\n :param file_paths: The file paths of each file that corresponds to this class/module.\n :return:\n '
votes = {}
for path in file_paths:
path_minus_name = os.sep.join(path.strip().split(os.sep)[0:(- 1)])
votes[path_minus_name] = ((votes[path_minus_name] + 1) if (path_minus_name in votes) else 1)
item_path =
total_votes = 0
for key in iter(votes.keys()):
if (votes[key] >= total_votes):
total_votes = votes[key]
item_path = key
folder = item_path.split(os.sep)[(- 1)]
file_name = file_paths[0].split(os.sep)[(- 1)].split('.')[0]
if (folder in global_dict_subsystems):
global_dict_subsystems[folder].append(file_name)
else:
global_dict_subsystems[folder] = [file_name]<|docstring|>Maps the subsystems globally using the new method of system detection (directory structures).
:param file_paths: The file paths of each file that corresponds to this class/module.
:return:<|endoftext|> |
74acbbea53493e363c54be32ad90ccb7d5e84189b9278bd1a0b48aec73749463 | def map_inheritance_and_composition(list_of_include_groups, use_old_discovery_mode):
'\n This function maps the relationships between the files which are related and fills the global\n "includes" and "inheritance" lists with tuples of the form: (includer, included).\n This function also populates the "classes" list.\n :param list_of_include_groups: A list of lists, each of the form [file_name_A, file_name_B, file_name_C, etc.] where\n file_name_B and file_name_C, etc. are all included BY file A.\n :param use_old_discovery_mode: Whether or not to use the old way of discovering subsystems (heuristics). The new\n way uses the directory structure to determine subsystems.\n '
print('Mapping relationships and identifying subsystems...')
for include_group in list_of_include_groups:
if (len(include_group) > 1):
parent_name = include_group[0]
if ({'name': parent_name} not in classes):
classes.append({'name': parent_name})
rest_of_layer = include_group[1:]
print(('Mapping relationships for ' + parent_name))
for item in rest_of_layer:
if ({'name': item} not in classes):
classes.append({'name': item})
relationship = (parent_name, item)
if (is_inheritance(parent_name, item) and (not (relationship in inheritance))):
print(((parent_name + ' INHERITS from ') + item))
inheritance.append(relationship)
elif ((relationship not in includes) and (not (relationship in inheritance))):
print(((parent_name + ' DEPENDS on ') + item))
includes.append(relationship)
map_subsystems(use_old_discovery_mode) | This function maps the relationships between the files which are related and fills the global
"includes" and "inheritance" lists with tuples of the form: (includer, included).
This function also populates the "classes" list.
:param list_of_include_groups: A list of lists, each of the form [file_name_A, file_name_B, file_name_C, etc.] where
file_name_B and file_name_C, etc. are all included BY file A.
:param use_old_discovery_mode: Whether or not to use the old way of discovering subsystems (heuristics). The new
way uses the directory structure to determine subsystems. | Tools/DiagramGenerator/dotGenerator.py | map_inheritance_and_composition | shastrihm/BrainGrid | 0 | python | def map_inheritance_and_composition(list_of_include_groups, use_old_discovery_mode):
'\n This function maps the relationships between the files which are related and fills the global\n "includes" and "inheritance" lists with tuples of the form: (includer, included).\n This function also populates the "classes" list.\n :param list_of_include_groups: A list of lists, each of the form [file_name_A, file_name_B, file_name_C, etc.] where\n file_name_B and file_name_C, etc. are all included BY file A.\n :param use_old_discovery_mode: Whether or not to use the old way of discovering subsystems (heuristics). The new\n way uses the directory structure to determine subsystems.\n '
print('Mapping relationships and identifying subsystems...')
for include_group in list_of_include_groups:
if (len(include_group) > 1):
parent_name = include_group[0]
if ({'name': parent_name} not in classes):
classes.append({'name': parent_name})
rest_of_layer = include_group[1:]
print(('Mapping relationships for ' + parent_name))
for item in rest_of_layer:
if ({'name': item} not in classes):
classes.append({'name': item})
relationship = (parent_name, item)
if (is_inheritance(parent_name, item) and (not (relationship in inheritance))):
print(((parent_name + ' INHERITS from ') + item))
inheritance.append(relationship)
elif ((relationship not in includes) and (not (relationship in inheritance))):
print(((parent_name + ' DEPENDS on ') + item))
includes.append(relationship)
map_subsystems(use_old_discovery_mode) | def map_inheritance_and_composition(list_of_include_groups, use_old_discovery_mode):
'\n This function maps the relationships between the files which are related and fills the global\n "includes" and "inheritance" lists with tuples of the form: (includer, included).\n This function also populates the "classes" list.\n :param list_of_include_groups: A list of lists, each of the form [file_name_A, file_name_B, file_name_C, etc.] where\n file_name_B and file_name_C, etc. are all included BY file A.\n :param use_old_discovery_mode: Whether or not to use the old way of discovering subsystems (heuristics). The new\n way uses the directory structure to determine subsystems.\n '
print('Mapping relationships and identifying subsystems...')
for include_group in list_of_include_groups:
if (len(include_group) > 1):
parent_name = include_group[0]
if ({'name': parent_name} not in classes):
classes.append({'name': parent_name})
rest_of_layer = include_group[1:]
print(('Mapping relationships for ' + parent_name))
for item in rest_of_layer:
if ({'name': item} not in classes):
classes.append({'name': item})
relationship = (parent_name, item)
if (is_inheritance(parent_name, item) and (not (relationship in inheritance))):
print(((parent_name + ' INHERITS from ') + item))
inheritance.append(relationship)
elif ((relationship not in includes) and (not (relationship in inheritance))):
print(((parent_name + ' DEPENDS on ') + item))
includes.append(relationship)
map_subsystems(use_old_discovery_mode)<|docstring|>This function maps the relationships between the files which are related and fills the global
"includes" and "inheritance" lists with tuples of the form: (includer, included).
This function also populates the "classes" list.
:param list_of_include_groups: A list of lists, each of the form [file_name_A, file_name_B, file_name_C, etc.] where
file_name_B and file_name_C, etc. are all included BY file A.
:param use_old_discovery_mode: Whether or not to use the old way of discovering subsystems (heuristics). The new
way uses the directory structure to determine subsystems.<|endoftext|> |
20350943158eb6fb4a108c43fa5810d13b2ae735efff49046b79c185dfeedb17 | def map_subsystems(use_old_discovery_mode=False):
'\n Walks through the three global lists (inheritance, includes, and classes) and determines what subsystem each\n item belongs to. Adds that information to the "classes" list.\n :param use_old_discovery_mode: Whether or not the subgraphs should be made by the old way of discovering them.\n '
if use_old_discovery_mode:
subsystems = [[item.get('name')] for item in classes]
subsystems = form_subsystems_from_inheritance(subsystems)
subsystems = form_subsystems_from_single_includers(subsystems)
for i in range(0, 10):
subsystems = form_subsystems_from_inclusions_in_other_subsystems(subsystems)
color_subsystems(subsystems, use_old_discovery_mode)
create_subgraphs(subsystems, use_old_discovery_mode)
else:
color_subsystems(global_dict_subsystems)
create_subgraphs(global_dict_subsystems) | Walks through the three global lists (inheritance, includes, and classes) and determines what subsystem each
item belongs to. Adds that information to the "classes" list.
:param use_old_discovery_mode: Whether or not the subgraphs should be made by the old way of discovering them. | Tools/DiagramGenerator/dotGenerator.py | map_subsystems | shastrihm/BrainGrid | 0 | python | def map_subsystems(use_old_discovery_mode=False):
'\n Walks through the three global lists (inheritance, includes, and classes) and determines what subsystem each\n item belongs to. Adds that information to the "classes" list.\n :param use_old_discovery_mode: Whether or not the subgraphs should be made by the old way of discovering them.\n '
if use_old_discovery_mode:
subsystems = [[item.get('name')] for item in classes]
subsystems = form_subsystems_from_inheritance(subsystems)
subsystems = form_subsystems_from_single_includers(subsystems)
for i in range(0, 10):
subsystems = form_subsystems_from_inclusions_in_other_subsystems(subsystems)
color_subsystems(subsystems, use_old_discovery_mode)
create_subgraphs(subsystems, use_old_discovery_mode)
else:
color_subsystems(global_dict_subsystems)
create_subgraphs(global_dict_subsystems) | def map_subsystems(use_old_discovery_mode=False):
'\n Walks through the three global lists (inheritance, includes, and classes) and determines what subsystem each\n item belongs to. Adds that information to the "classes" list.\n :param use_old_discovery_mode: Whether or not the subgraphs should be made by the old way of discovering them.\n '
if use_old_discovery_mode:
subsystems = [[item.get('name')] for item in classes]
subsystems = form_subsystems_from_inheritance(subsystems)
subsystems = form_subsystems_from_single_includers(subsystems)
for i in range(0, 10):
subsystems = form_subsystems_from_inclusions_in_other_subsystems(subsystems)
color_subsystems(subsystems, use_old_discovery_mode)
create_subgraphs(subsystems, use_old_discovery_mode)
else:
color_subsystems(global_dict_subsystems)
create_subgraphs(global_dict_subsystems)<|docstring|>Walks through the three global lists (inheritance, includes, and classes) and determines what subsystem each
item belongs to. Adds that information to the "classes" list.
:param use_old_discovery_mode: Whether or not the subgraphs should be made by the old way of discovering them.<|endoftext|> |
09ff7f8a2dcb69bf489dcd52aab34ef1eeb8a9b379beccd75454bb9b2a420cb7 | def remove_extensions():
"\n Removes all the file extensions from allowables which don't actually exist for this project.\n :return: Nothing\n "
for i in extension_ignores:
if (i in allowable_file_types):
allowable_file_types.remove(i) | Removes all the file extensions from allowables which don't actually exist for this project.
:return: Nothing | Tools/DiagramGenerator/dotGenerator.py | remove_extensions | shastrihm/BrainGrid | 0 | python | def remove_extensions():
"\n Removes all the file extensions from allowables which don't actually exist for this project.\n :return: Nothing\n "
for i in extension_ignores:
if (i in allowable_file_types):
allowable_file_types.remove(i) | def remove_extensions():
"\n Removes all the file extensions from allowables which don't actually exist for this project.\n :return: Nothing\n "
for i in extension_ignores:
if (i in allowable_file_types):
allowable_file_types.remove(i)<|docstring|>Removes all the file extensions from allowables which don't actually exist for this project.
:return: Nothing<|endoftext|> |
0dc5939867dbba318b38d913f14c6b25eb51f6033af1493f7febe211be888f13 | def trim_directory():
"\n Searches from the working directory down recursively, adding any directories it finds which don't have any\n files with .cpp, .h, .cu, etc extensions in them to the ignore list.\n Also, if it never finds any .cc or .cu (or .c, .cpp, etc) files, it adds those extensions to the ignore list.\n :return: Nothing\n "
exts = {}
for t in allowable_file_types:
exts[t] = False
dir_tree = os.walk('.')
for (root, dirnames, filenames) in dir_tree:
d = os.path.basename(root)
if (d in ignores):
continue
else:
ignore_dir = True
for extension in allowable_file_types:
if fnmatch.filter(filenames, ('*' + extension)):
exts[extension] = True
ignore_dir = False
if ignore_dir:
ignores.append(d)
for extension in iter(exts.keys()):
if (not exts[extension]):
extension_ignores.append(extension) | Searches from the working directory down recursively, adding any directories it finds which don't have any
files with .cpp, .h, .cu, etc extensions in them to the ignore list.
Also, if it never finds any .cc or .cu (or .c, .cpp, etc) files, it adds those extensions to the ignore list.
:return: Nothing | Tools/DiagramGenerator/dotGenerator.py | trim_directory | shastrihm/BrainGrid | 0 | python | def trim_directory():
"\n Searches from the working directory down recursively, adding any directories it finds which don't have any\n files with .cpp, .h, .cu, etc extensions in them to the ignore list.\n Also, if it never finds any .cc or .cu (or .c, .cpp, etc) files, it adds those extensions to the ignore list.\n :return: Nothing\n "
exts = {}
for t in allowable_file_types:
exts[t] = False
dir_tree = os.walk('.')
for (root, dirnames, filenames) in dir_tree:
d = os.path.basename(root)
if (d in ignores):
continue
else:
ignore_dir = True
for extension in allowable_file_types:
if fnmatch.filter(filenames, ('*' + extension)):
exts[extension] = True
ignore_dir = False
if ignore_dir:
ignores.append(d)
for extension in iter(exts.keys()):
if (not exts[extension]):
extension_ignores.append(extension) | def trim_directory():
"\n Searches from the working directory down recursively, adding any directories it finds which don't have any\n files with .cpp, .h, .cu, etc extensions in them to the ignore list.\n Also, if it never finds any .cc or .cu (or .c, .cpp, etc) files, it adds those extensions to the ignore list.\n :return: Nothing\n "
exts = {}
for t in allowable_file_types:
exts[t] = False
dir_tree = os.walk('.')
for (root, dirnames, filenames) in dir_tree:
d = os.path.basename(root)
if (d in ignores):
continue
else:
ignore_dir = True
for extension in allowable_file_types:
if fnmatch.filter(filenames, ('*' + extension)):
exts[extension] = True
ignore_dir = False
if ignore_dir:
ignores.append(d)
for extension in iter(exts.keys()):
if (not exts[extension]):
extension_ignores.append(extension)<|docstring|>Searches from the working directory down recursively, adding any directories it finds which don't have any
files with .cpp, .h, .cu, etc extensions in them to the ignore list.
Also, if it never finds any .cc or .cu (or .c, .cpp, etc) files, it adds those extensions to the ignore list.
:return: Nothing<|endoftext|> |
67b410486ec5305250f97a52fd4365feb90b409afa171fb0b807edd5a43ffe42 | def create_diagram_directory():
'\n initializes directory to put all output diagrams in, called "dot_diagrams"\n Returns the path of that directory. \n '
dir_name = 'dot_diagrams'
if os.path.isdir(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
return ((os.getcwd() + os.sep) + dir_name) | initializes directory to put all output diagrams in, called "dot_diagrams"
Returns the path of that directory. | Tools/DiagramGenerator/dotGenerator.py | create_diagram_directory | shastrihm/BrainGrid | 0 | python | def create_diagram_directory():
'\n initializes directory to put all output diagrams in, called "dot_diagrams"\n Returns the path of that directory. \n '
dir_name = 'dot_diagrams'
if os.path.isdir(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
return ((os.getcwd() + os.sep) + dir_name) | def create_diagram_directory():
'\n initializes directory to put all output diagrams in, called "dot_diagrams"\n Returns the path of that directory. \n '
dir_name = 'dot_diagrams'
if os.path.isdir(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
return ((os.getcwd() + os.sep) + dir_name)<|docstring|>initializes directory to put all output diagrams in, called "dot_diagrams"
Returns the path of that directory.<|endoftext|> |
156927c957c6c3a05f90d0309589668051466daf8e3703eec0f7c4e332ab1b81 | def output_as(dot_file_name, file_ext, destination):
'\n Runs the command line command "dot -T(file_ext) inp_file.dot > out_file.dot.(file_ext)"\n on all dot files created. In other word, this functions converts the dot files to something\n we can actually make sense of. The output files are all put in a top level folder called\n "dot_diagrams"\n :param dot_file_name: the dot_file name (string) \n :param file_ext: the file extension to output the graphs as, e.g. "png" or "pdf"\n :param destination: the directory to put the output file in, as a string\n '
os.system(((((((('dot -T' + file_ext) + ' ') + dot_file_name) + '.dot > out_') + dot_file_name) + '.dot.') + file_ext))
outfile_name = ((('out_' + dot_file_name) + '.dot.') + file_ext)
cwd = os.getcwd()
shutil.move(((cwd + os.sep) + outfile_name), ((destination + os.sep) + outfile_name)) | Runs the command line command "dot -T(file_ext) inp_file.dot > out_file.dot.(file_ext)"
on all dot files created. In other word, this functions converts the dot files to something
we can actually make sense of. The output files are all put in a top level folder called
"dot_diagrams"
:param dot_file_name: the dot_file name (string)
:param file_ext: the file extension to output the graphs as, e.g. "png" or "pdf"
:param destination: the directory to put the output file in, as a string | Tools/DiagramGenerator/dotGenerator.py | output_as | shastrihm/BrainGrid | 0 | python | def output_as(dot_file_name, file_ext, destination):
'\n Runs the command line command "dot -T(file_ext) inp_file.dot > out_file.dot.(file_ext)"\n on all dot files created. In other word, this functions converts the dot files to something\n we can actually make sense of. The output files are all put in a top level folder called\n "dot_diagrams"\n :param dot_file_name: the dot_file name (string) \n :param file_ext: the file extension to output the graphs as, e.g. "png" or "pdf"\n :param destination: the directory to put the output file in, as a string\n '
os.system(((((((('dot -T' + file_ext) + ' ') + dot_file_name) + '.dot > out_') + dot_file_name) + '.dot.') + file_ext))
outfile_name = ((('out_' + dot_file_name) + '.dot.') + file_ext)
cwd = os.getcwd()
shutil.move(((cwd + os.sep) + outfile_name), ((destination + os.sep) + outfile_name)) | def output_as(dot_file_name, file_ext, destination):
'\n Runs the command line command "dot -T(file_ext) inp_file.dot > out_file.dot.(file_ext)"\n on all dot files created. In other word, this functions converts the dot files to something\n we can actually make sense of. The output files are all put in a top level folder called\n "dot_diagrams"\n :param dot_file_name: the dot_file name (string) \n :param file_ext: the file extension to output the graphs as, e.g. "png" or "pdf"\n :param destination: the directory to put the output file in, as a string\n '
os.system(((((((('dot -T' + file_ext) + ' ') + dot_file_name) + '.dot > out_') + dot_file_name) + '.dot.') + file_ext))
outfile_name = ((('out_' + dot_file_name) + '.dot.') + file_ext)
cwd = os.getcwd()
shutil.move(((cwd + os.sep) + outfile_name), ((destination + os.sep) + outfile_name))<|docstring|>Runs the command line command "dot -T(file_ext) inp_file.dot > out_file.dot.(file_ext)"
on all dot files created. In other word, this functions converts the dot files to something
we can actually make sense of. The output files are all put in a top level folder called
"dot_diagrams"
:param dot_file_name: the dot_file name (string)
:param file_ext: the file extension to output the graphs as, e.g. "png" or "pdf"
:param destination: the directory to put the output file in, as a string<|endoftext|> |
0d208c59a3c0ec6032b185003b3fe75e4ca955a33d83fff591d8c52363a664e2 | def scatter_correlation(df=None, df_eg0pt1=None, df_eg0pt3=None, df_unif=None, n=None, num_sims=None, load_df=True, title=None, df_ts=None, alg_key='TS'):
'\n maybe something like |proportion condition 1 - 0.5| vs. difference in means? Something which captures the imbalance directly\n \n '
df_eg0pt1 = df_eg0pt1
wald_pval_eg0pt1 = ((1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1['wald_type_stat'].dropna()))) * 2)
df_eg0pt1['Wald Rejected'] = (df_eg0pt1['wald_pval'] < 0.05)
wald_pval_eg0pt3 = ((1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3['wald_type_stat'].dropna()))) * 2)
df_eg0pt3['Wald Rejected'] = (df_eg0pt3['wald_pval'] < 0.05)
wald_pval_ts = ((1 - scipy.stats.norm.cdf(np.abs(df_ts['wald_type_stat'].dropna()))) * 2)
df_ts['Wald Rejected'] = (df_ts['wald_pval'] < 0.05)
wald_pval_unif = ((1 - scipy.stats.norm.cdf(np.abs(df_unif['wald_type_stat'].dropna()))) * 2)
df_unif['Wald Rejected'] = (df_unif['wald_pval'] < 0.05)
(fig, ax) = plt.subplots(2, 2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ['n/2', 'n', '2*n', '4*n']
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[(df_eg0pt1['num_steps'] == num_steps)]
df_for_num_steps_eg0pt3 = df_eg0pt3[(df_eg0pt3['num_steps'] == num_steps)]
df_for_num_steps_unif = df_unif[(df_unif['num_steps'] == num_steps)]
df_for_num_steps_ts = df_ts[(df_ts['num_steps'] == num_steps)]
alg_dict = {'TS': df_for_num_steps_ts, 'EG0pt1': df_for_num_steps_eg0pt1, 'EG0pt3': df_for_num_steps_eg0pt3, 'Uniform': df_for_num_steps_unif}
df_list = [alg_dict[alg_key]]
x_label = 'sample_size_1'
y_label = 'mean_{}'
plot_correlation(fig, ax=ax[i], df_list=df_list, x_label=x_label, y_label=y_label, num_steps=num_steps, ax_idx=i)
num_replications = len(df_for_num_steps_eg0pt1)
ax[i].set_xlabel('|Proportion of samples in Condtion 1 - 0.5| For Number of participants = {} = {}'.format(size_vars[i], num_steps))
ax[i].set_ylim(0, 1.02)
ax[i].set_xlim(0, 0.501)
ax[i].set_ylabel('Difference in Arm Mean Estimates |$\\hatp1$ - $\\hatp2$|')
i += 1
fig.suptitle(title)
fig.subplots_adjust(top=0.8)
save_dir_ne = '../simulation_analysis_saves/scatter_correlation/NoEffect/'
save_dir_e = '../simulation_analysis_saves/scatter_correlation/Effect/'
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = (save_dir_ne + '{}.png'.format(title))
save_str_e = (save_dir_e + '{}.png'.format(title))
if ('No Effect' in title):
print('saving to ', save_str_ne)
fig.savefig(save_str_ne, bbox_inches='tight')
elif ('With Effect' in title):
print('saving to ', save_str_e, bbox_inches='tight')
fig.savefig(save_str_e)
plt.clf()
plt.close() | maybe something like |proportion condition 1 - 0.5| vs. difference in means? Something which captures the imbalance directly | PostDiffMixture/simulations_folder/Old/simulation_analysis_scripts/scatter_plot_functions.py | scatter_correlation | SIGKDDanon/SIGKDD2021DeAnonV2 | 0 | python | def scatter_correlation(df=None, df_eg0pt1=None, df_eg0pt3=None, df_unif=None, n=None, num_sims=None, load_df=True, title=None, df_ts=None, alg_key='TS'):
'\n \n \n '
df_eg0pt1 = df_eg0pt1
wald_pval_eg0pt1 = ((1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1['wald_type_stat'].dropna()))) * 2)
df_eg0pt1['Wald Rejected'] = (df_eg0pt1['wald_pval'] < 0.05)
wald_pval_eg0pt3 = ((1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3['wald_type_stat'].dropna()))) * 2)
df_eg0pt3['Wald Rejected'] = (df_eg0pt3['wald_pval'] < 0.05)
wald_pval_ts = ((1 - scipy.stats.norm.cdf(np.abs(df_ts['wald_type_stat'].dropna()))) * 2)
df_ts['Wald Rejected'] = (df_ts['wald_pval'] < 0.05)
wald_pval_unif = ((1 - scipy.stats.norm.cdf(np.abs(df_unif['wald_type_stat'].dropna()))) * 2)
df_unif['Wald Rejected'] = (df_unif['wald_pval'] < 0.05)
(fig, ax) = plt.subplots(2, 2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ['n/2', 'n', '2*n', '4*n']
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[(df_eg0pt1['num_steps'] == num_steps)]
df_for_num_steps_eg0pt3 = df_eg0pt3[(df_eg0pt3['num_steps'] == num_steps)]
df_for_num_steps_unif = df_unif[(df_unif['num_steps'] == num_steps)]
df_for_num_steps_ts = df_ts[(df_ts['num_steps'] == num_steps)]
alg_dict = {'TS': df_for_num_steps_ts, 'EG0pt1': df_for_num_steps_eg0pt1, 'EG0pt3': df_for_num_steps_eg0pt3, 'Uniform': df_for_num_steps_unif}
df_list = [alg_dict[alg_key]]
x_label = 'sample_size_1'
y_label = 'mean_{}'
plot_correlation(fig, ax=ax[i], df_list=df_list, x_label=x_label, y_label=y_label, num_steps=num_steps, ax_idx=i)
num_replications = len(df_for_num_steps_eg0pt1)
ax[i].set_xlabel('|Proportion of samples in Condtion 1 - 0.5| For Number of participants = {} = {}'.format(size_vars[i], num_steps))
ax[i].set_ylim(0, 1.02)
ax[i].set_xlim(0, 0.501)
ax[i].set_ylabel('Difference in Arm Mean Estimates |$\\hatp1$ - $\\hatp2$|')
i += 1
fig.suptitle(title)
fig.subplots_adjust(top=0.8)
save_dir_ne = '../simulation_analysis_saves/scatter_correlation/NoEffect/'
save_dir_e = '../simulation_analysis_saves/scatter_correlation/Effect/'
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = (save_dir_ne + '{}.png'.format(title))
save_str_e = (save_dir_e + '{}.png'.format(title))
if ('No Effect' in title):
print('saving to ', save_str_ne)
fig.savefig(save_str_ne, bbox_inches='tight')
elif ('With Effect' in title):
print('saving to ', save_str_e, bbox_inches='tight')
fig.savefig(save_str_e)
plt.clf()
plt.close() | def scatter_correlation(df=None, df_eg0pt1=None, df_eg0pt3=None, df_unif=None, n=None, num_sims=None, load_df=True, title=None, df_ts=None, alg_key='TS'):
'\n \n \n '
df_eg0pt1 = df_eg0pt1
wald_pval_eg0pt1 = ((1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1['wald_type_stat'].dropna()))) * 2)
df_eg0pt1['Wald Rejected'] = (df_eg0pt1['wald_pval'] < 0.05)
wald_pval_eg0pt3 = ((1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3['wald_type_stat'].dropna()))) * 2)
df_eg0pt3['Wald Rejected'] = (df_eg0pt3['wald_pval'] < 0.05)
wald_pval_ts = ((1 - scipy.stats.norm.cdf(np.abs(df_ts['wald_type_stat'].dropna()))) * 2)
df_ts['Wald Rejected'] = (df_ts['wald_pval'] < 0.05)
wald_pval_unif = ((1 - scipy.stats.norm.cdf(np.abs(df_unif['wald_type_stat'].dropna()))) * 2)
df_unif['Wald Rejected'] = (df_unif['wald_pval'] < 0.05)
(fig, ax) = plt.subplots(2, 2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ['n/2', 'n', '2*n', '4*n']
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[(df_eg0pt1['num_steps'] == num_steps)]
df_for_num_steps_eg0pt3 = df_eg0pt3[(df_eg0pt3['num_steps'] == num_steps)]
df_for_num_steps_unif = df_unif[(df_unif['num_steps'] == num_steps)]
df_for_num_steps_ts = df_ts[(df_ts['num_steps'] == num_steps)]
alg_dict = {'TS': df_for_num_steps_ts, 'EG0pt1': df_for_num_steps_eg0pt1, 'EG0pt3': df_for_num_steps_eg0pt3, 'Uniform': df_for_num_steps_unif}
df_list = [alg_dict[alg_key]]
x_label = 'sample_size_1'
y_label = 'mean_{}'
plot_correlation(fig, ax=ax[i], df_list=df_list, x_label=x_label, y_label=y_label, num_steps=num_steps, ax_idx=i)
num_replications = len(df_for_num_steps_eg0pt1)
ax[i].set_xlabel('|Proportion of samples in Condtion 1 - 0.5| For Number of participants = {} = {}'.format(size_vars[i], num_steps))
ax[i].set_ylim(0, 1.02)
ax[i].set_xlim(0, 0.501)
ax[i].set_ylabel('Difference in Arm Mean Estimates |$\\hatp1$ - $\\hatp2$|')
i += 1
fig.suptitle(title)
fig.subplots_adjust(top=0.8)
save_dir_ne = '../simulation_analysis_saves/scatter_correlation/NoEffect/'
save_dir_e = '../simulation_analysis_saves/scatter_correlation/Effect/'
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = (save_dir_ne + '{}.png'.format(title))
save_str_e = (save_dir_e + '{}.png'.format(title))
if ('No Effect' in title):
print('saving to ', save_str_ne)
fig.savefig(save_str_ne, bbox_inches='tight')
elif ('With Effect' in title):
print('saving to ', save_str_e, bbox_inches='tight')
fig.savefig(save_str_e)
plt.clf()
plt.close()<|docstring|>maybe something like |proportion condition 1 - 0.5| vs. difference in means? Something which captures the imbalance directly<|endoftext|> |
8031566eccc54f7e35b79328dcabc64beb6fc0effa57080caac1f8621f188196 | def loads(text):
'\n Parses TOML text into a dict-like object and returns it.\n '
tokens = tuple(lexer(text, is_top_level=True))
elements = parse_tokens(tokens)
return TOMLFile(elements) | Parses TOML text into a dict-like object and returns it. | poetry/toml/__init__.py | loads | markovendelin/poetry | 0 | python | def loads(text):
'\n \n '
tokens = tuple(lexer(text, is_top_level=True))
elements = parse_tokens(tokens)
return TOMLFile(elements) | def loads(text):
'\n \n '
tokens = tuple(lexer(text, is_top_level=True))
elements = parse_tokens(tokens)
return TOMLFile(elements)<|docstring|>Parses TOML text into a dict-like object and returns it.<|endoftext|> |
40277d260b26769981334cf3e189e1776a24996729d947b79da7ad2dcaeda854 | def load(file_path):
'\n Parses a TOML file into a dict-like object and returns it.\n '
with open(file_path) as fd:
return loads(fd.read()) | Parses a TOML file into a dict-like object and returns it. | poetry/toml/__init__.py | load | markovendelin/poetry | 0 | python | def load(file_path):
'\n \n '
with open(file_path) as fd:
return loads(fd.read()) | def load(file_path):
'\n \n '
with open(file_path) as fd:
return loads(fd.read())<|docstring|>Parses a TOML file into a dict-like object and returns it.<|endoftext|> |
743c3854cab40f00ee73f7bce1d28e4c6944f285d3a961f51b8aff15dd902527 | def dumps(value):
'\n Dumps a data structure to TOML source code.\n\n The given value must be either a dict of dict values, a dict,\n or a TOML file constructed by this module.\n '
if (not isinstance(value, TOMLFile)):
raise RuntimeError('Can only dump a TOMLFile instance loaded by load() or loads()')
return value.dumps() | Dumps a data structure to TOML source code.
The given value must be either a dict of dict values, a dict,
or a TOML file constructed by this module. | poetry/toml/__init__.py | dumps | markovendelin/poetry | 0 | python | def dumps(value):
'\n Dumps a data structure to TOML source code.\n\n The given value must be either a dict of dict values, a dict,\n or a TOML file constructed by this module.\n '
if (not isinstance(value, TOMLFile)):
raise RuntimeError('Can only dump a TOMLFile instance loaded by load() or loads()')
return value.dumps() | def dumps(value):
'\n Dumps a data structure to TOML source code.\n\n The given value must be either a dict of dict values, a dict,\n or a TOML file constructed by this module.\n '
if (not isinstance(value, TOMLFile)):
raise RuntimeError('Can only dump a TOMLFile instance loaded by load() or loads()')
return value.dumps()<|docstring|>Dumps a data structure to TOML source code.
The given value must be either a dict of dict values, a dict,
or a TOML file constructed by this module.<|endoftext|> |
04e3b1b2b911994058b4b3853776e80dd3a9769b0db1cb3ee50867f97105d6a8 | def dump(obj, file_path, prettify=False):
'\n Dumps a data structure to the filesystem as TOML.\n\n The given value must be either a dict of dict values, a dict,\n or a TOML file constructed by this module.\n '
with open(file_path, 'w') as fp:
fp.write(dumps(obj)) | Dumps a data structure to the filesystem as TOML.
The given value must be either a dict of dict values, a dict,
or a TOML file constructed by this module. | poetry/toml/__init__.py | dump | markovendelin/poetry | 0 | python | def dump(obj, file_path, prettify=False):
'\n Dumps a data structure to the filesystem as TOML.\n\n The given value must be either a dict of dict values, a dict,\n or a TOML file constructed by this module.\n '
with open(file_path, 'w') as fp:
fp.write(dumps(obj)) | def dump(obj, file_path, prettify=False):
'\n Dumps a data structure to the filesystem as TOML.\n\n The given value must be either a dict of dict values, a dict,\n or a TOML file constructed by this module.\n '
with open(file_path, 'w') as fp:
fp.write(dumps(obj))<|docstring|>Dumps a data structure to the filesystem as TOML.
The given value must be either a dict of dict values, a dict,
or a TOML file constructed by this module.<|endoftext|> |
534c1e680887acff74d7cffbadc3141f12d37b50ee001ea2ec41e50e2f5b0669 | def _load_class(classname):
'Load a class from a string'
(module_name, class_name) = classname.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name) | Load a class from a string | explorers/tools.py | _load_class | humm/explorers | 0 | python | def _load_class(classname):
(module_name, class_name) = classname.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name) | def _load_class(classname):
(module_name, class_name) = classname.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name)<|docstring|>Load a class from a string<|endoftext|> |
af693b41baa1bab28953e22d870851463c622fd766bd98796e1e828454587ae6 | def to_vector(signal, channels=None):
'Convert a signal to a vector'
if (channels is None):
assert isinstance(signal, collections.OrderedDict)
return tuple(signal.values())
else:
return tuple((signal[c.name] for c in channels)) | Convert a signal to a vector | explorers/tools.py | to_vector | humm/explorers | 0 | python | def to_vector(signal, channels=None):
if (channels is None):
assert isinstance(signal, collections.OrderedDict)
return tuple(signal.values())
else:
return tuple((signal[c.name] for c in channels)) | def to_vector(signal, channels=None):
if (channels is None):
assert isinstance(signal, collections.OrderedDict)
return tuple(signal.values())
else:
return tuple((signal[c.name] for c in channels))<|docstring|>Convert a signal to a vector<|endoftext|> |
aa14cf44df84f368622559eb9f55e718aa05abe414b4d0d713860509764ca219 | def to_signal(vector, channels):
'Convert a vector to a signal'
assert (len(vector) == len(channels))
return {c_i.name: v_i for (c_i, v_i) in zip(channels, vector)} | Convert a vector to a signal | explorers/tools.py | to_signal | humm/explorers | 0 | python | def to_signal(vector, channels):
assert (len(vector) == len(channels))
return {c_i.name: v_i for (c_i, v_i) in zip(channels, vector)} | def to_signal(vector, channels):
assert (len(vector) == len(channels))
return {c_i.name: v_i for (c_i, v_i) in zip(channels, vector)}<|docstring|>Convert a vector to a signal<|endoftext|> |
3a5cf963e9a946f9543185c7664d82ea3a86cf3feb460bb1bfb75612eeca48f2 | def merge_signals(signal_a, signal_b):
'\n Merge signal_a and signal_b into a single signal.\n The two signal must have non-overlapping channels.\n '
signal = copy.copy(signal_a)
for (c, v) in signal_b.items():
assert (c not in signal)
signal[c] = v
return signal | Merge signal_a and signal_b into a single signal.
The two signal must have non-overlapping channels. | explorers/tools.py | merge_signals | humm/explorers | 0 | python | def merge_signals(signal_a, signal_b):
'\n Merge signal_a and signal_b into a single signal.\n The two signal must have non-overlapping channels.\n '
signal = copy.copy(signal_a)
for (c, v) in signal_b.items():
assert (c not in signal)
signal[c] = v
return signal | def merge_signals(signal_a, signal_b):
'\n Merge signal_a and signal_b into a single signal.\n The two signal must have non-overlapping channels.\n '
signal = copy.copy(signal_a)
for (c, v) in signal_b.items():
assert (c not in signal)
signal[c] = v
return signal<|docstring|>Merge signal_a and signal_b into a single signal.
The two signal must have non-overlapping channels.<|endoftext|> |
24ac5b6ee4f289d197a2dae6ad17145946608f9884accb27cc5fb90e64ab1881 | def roulette_wheel(proba):
'Given a vector p, return index i with probability p_i/sum(p).\n Elements of p are positive numbers.\n @param proba list of positive numbers\n '
assert (len(proba) >= 1)
sum_proba = sum(proba)
dice = random.uniform(0.0, sum_proba)
if (sum_proba == 0.0):
return random.randint(0, (len(proba) - 1))
(s, i) = (proba[0], 0)
while ((i < (len(proba) - 1)) and (dice >= s)):
i += 1
assert (proba[i] >= 0), 'all elements are not positive {}'.format(proba)
s += proba[i]
return i | Given a vector p, return index i with probability p_i/sum(p).
Elements of p are positive numbers.
@param proba list of positive numbers | explorers/tools.py | roulette_wheel | humm/explorers | 0 | python | def roulette_wheel(proba):
'Given a vector p, return index i with probability p_i/sum(p).\n Elements of p are positive numbers.\n @param proba list of positive numbers\n '
assert (len(proba) >= 1)
sum_proba = sum(proba)
dice = random.uniform(0.0, sum_proba)
if (sum_proba == 0.0):
return random.randint(0, (len(proba) - 1))
(s, i) = (proba[0], 0)
while ((i < (len(proba) - 1)) and (dice >= s)):
i += 1
assert (proba[i] >= 0), 'all elements are not positive {}'.format(proba)
s += proba[i]
return i | def roulette_wheel(proba):
'Given a vector p, return index i with probability p_i/sum(p).\n Elements of p are positive numbers.\n @param proba list of positive numbers\n '
assert (len(proba) >= 1)
sum_proba = sum(proba)
dice = random.uniform(0.0, sum_proba)
if (sum_proba == 0.0):
return random.randint(0, (len(proba) - 1))
(s, i) = (proba[0], 0)
while ((i < (len(proba) - 1)) and (dice >= s)):
i += 1
assert (proba[i] >= 0), 'all elements are not positive {}'.format(proba)
s += proba[i]
return i<|docstring|>Given a vector p, return index i with probability p_i/sum(p).
Elements of p are positive numbers.
@param proba list of positive numbers<|endoftext|> |
efe6e3448d1da3f7be606a6f66025f2033ccce3cb577e321b84011c32e1fcac8 | def is_string(obj):
'\n Is the given object a string?\n '
return isinstance(obj, string_types) | Is the given object a string? | python/replicate/_vendor/colors/colors.py | is_string | hemildesai/replicate | 810 | python | def is_string(obj):
'\n \n '
return isinstance(obj, string_types) | def is_string(obj):
'\n \n '
return isinstance(obj, string_types)<|docstring|>Is the given object a string?<|endoftext|> |
36fd67d516aafb141a52bb30bc694adabad6fb8ad7962f7397402120ae6f2191 | def _join(*values):
'\n Join a series of values with semicolons. The values\n are either integers or strings, so stringify each for\n good measure. Worth breaking out as its own function\n because semicolon-joined lists are core to ANSI coding.\n '
return ';'.join((str(v) for v in values)) | Join a series of values with semicolons. The values
are either integers or strings, so stringify each for
good measure. Worth breaking out as its own function
because semicolon-joined lists are core to ANSI coding. | python/replicate/_vendor/colors/colors.py | _join | hemildesai/replicate | 810 | python | def _join(*values):
'\n Join a series of values with semicolons. The values\n are either integers or strings, so stringify each for\n good measure. Worth breaking out as its own function\n because semicolon-joined lists are core to ANSI coding.\n '
return ';'.join((str(v) for v in values)) | def _join(*values):
'\n Join a series of values with semicolons. The values\n are either integers or strings, so stringify each for\n good measure. Worth breaking out as its own function\n because semicolon-joined lists are core to ANSI coding.\n '
return ';'.join((str(v) for v in values))<|docstring|>Join a series of values with semicolons. The values
are either integers or strings, so stringify each for
good measure. Worth breaking out as its own function
because semicolon-joined lists are core to ANSI coding.<|endoftext|> |
24194e165c8afd9ca96c9755891f96c064da43647b2192eb5e392c56a47d6c71 | def _color_code(spec, base):
"\n Workhorse of encoding a color. Give preference to named colors from\n ANSI, then to specific numeric or tuple specs. If those don't work,\n try looking up look CSS color names or parsing CSS hex and rgb color\n specifications.\n\n :param str|int|tuple|list spec: Unparsed color specification\n :param int base: Either 30 or 40, signifying the base value\n for color encoding (foreground and background respectively).\n Low values are added directly to the base. Higher values use `\n base + 8` (i.e. 38 or 48) then extended codes.\n :returns: Discovered ANSI color encoding.\n :rtype: str\n :raises: ValueError if cannot parse the color spec.\n "
if is_string(spec):
spec = spec.strip().lower()
if (spec == 'default'):
return _join((base + 9))
elif (spec in COLORS):
return _join((base + COLORS.index(spec)))
elif (isinstance(spec, int) and (0 <= spec <= 255)):
return _join((base + 8), 5, spec)
elif isinstance(spec, (tuple, list)):
return _join((base + 8), 2, _join(*spec))
else:
rgb = parse_rgb(spec)
return _join((base + 8), 2, _join(*rgb)) | Workhorse of encoding a color. Give preference to named colors from
ANSI, then to specific numeric or tuple specs. If those don't work,
try looking up look CSS color names or parsing CSS hex and rgb color
specifications.
:param str|int|tuple|list spec: Unparsed color specification
:param int base: Either 30 or 40, signifying the base value
for color encoding (foreground and background respectively).
Low values are added directly to the base. Higher values use `
base + 8` (i.e. 38 or 48) then extended codes.
:returns: Discovered ANSI color encoding.
:rtype: str
:raises: ValueError if cannot parse the color spec. | python/replicate/_vendor/colors/colors.py | _color_code | hemildesai/replicate | 810 | python | def _color_code(spec, base):
"\n Workhorse of encoding a color. Give preference to named colors from\n ANSI, then to specific numeric or tuple specs. If those don't work,\n try looking up look CSS color names or parsing CSS hex and rgb color\n specifications.\n\n :param str|int|tuple|list spec: Unparsed color specification\n :param int base: Either 30 or 40, signifying the base value\n for color encoding (foreground and background respectively).\n Low values are added directly to the base. Higher values use `\n base + 8` (i.e. 38 or 48) then extended codes.\n :returns: Discovered ANSI color encoding.\n :rtype: str\n :raises: ValueError if cannot parse the color spec.\n "
if is_string(spec):
spec = spec.strip().lower()
if (spec == 'default'):
return _join((base + 9))
elif (spec in COLORS):
return _join((base + COLORS.index(spec)))
elif (isinstance(spec, int) and (0 <= spec <= 255)):
return _join((base + 8), 5, spec)
elif isinstance(spec, (tuple, list)):
return _join((base + 8), 2, _join(*spec))
else:
rgb = parse_rgb(spec)
return _join((base + 8), 2, _join(*rgb)) | def _color_code(spec, base):
"\n Workhorse of encoding a color. Give preference to named colors from\n ANSI, then to specific numeric or tuple specs. If those don't work,\n try looking up look CSS color names or parsing CSS hex and rgb color\n specifications.\n\n :param str|int|tuple|list spec: Unparsed color specification\n :param int base: Either 30 or 40, signifying the base value\n for color encoding (foreground and background respectively).\n Low values are added directly to the base. Higher values use `\n base + 8` (i.e. 38 or 48) then extended codes.\n :returns: Discovered ANSI color encoding.\n :rtype: str\n :raises: ValueError if cannot parse the color spec.\n "
if is_string(spec):
spec = spec.strip().lower()
if (spec == 'default'):
return _join((base + 9))
elif (spec in COLORS):
return _join((base + COLORS.index(spec)))
elif (isinstance(spec, int) and (0 <= spec <= 255)):
return _join((base + 8), 5, spec)
elif isinstance(spec, (tuple, list)):
return _join((base + 8), 2, _join(*spec))
else:
rgb = parse_rgb(spec)
return _join((base + 8), 2, _join(*rgb))<|docstring|>Workhorse of encoding a color. Give preference to named colors from
ANSI, then to specific numeric or tuple specs. If those don't work,
try looking up look CSS color names or parsing CSS hex and rgb color
specifications.
:param str|int|tuple|list spec: Unparsed color specification
:param int base: Either 30 or 40, signifying the base value
for color encoding (foreground and background respectively).
Low values are added directly to the base. Higher values use `
base + 8` (i.e. 38 or 48) then extended codes.
:returns: Discovered ANSI color encoding.
:rtype: str
:raises: ValueError if cannot parse the color spec.<|endoftext|> |
cc0587117dee9d2753ce29151661f7f12da80b03f62e276c42e910f392cf2ed8 | def color(s, fg=None, bg=None, style=None):
"\n Add ANSI colors and styles to a string.\n\n :param str s: String to format.\n :param str|int|tuple fg: Foreground color specification.\n :param str|int|tuple bg: Background color specification.\n :param str: Style names, separated by '+'\n :returns: Formatted string.\n :rtype: str (or unicode in Python 2, if s is unicode)\n "
codes = []
if fg:
codes.append(_color_code(fg, 30))
if bg:
codes.append(_color_code(bg, 40))
if style:
for style_part in style.split('+'):
if (style_part in STYLES):
codes.append(STYLES.index(style_part))
else:
raise ValueError(('Invalid style "%s"' % style_part))
if codes:
template = '\x1b[{0}m{1}\x1b[0m'
if (_PY2 and isinstance(s, unicode)):
template = unicode(template)
return template.format(_join(*codes), s)
else:
return s | Add ANSI colors and styles to a string.
:param str s: String to format.
:param str|int|tuple fg: Foreground color specification.
:param str|int|tuple bg: Background color specification.
:param str: Style names, separated by '+'
:returns: Formatted string.
:rtype: str (or unicode in Python 2, if s is unicode) | python/replicate/_vendor/colors/colors.py | color | hemildesai/replicate | 810 | python | def color(s, fg=None, bg=None, style=None):
"\n Add ANSI colors and styles to a string.\n\n :param str s: String to format.\n :param str|int|tuple fg: Foreground color specification.\n :param str|int|tuple bg: Background color specification.\n :param str: Style names, separated by '+'\n :returns: Formatted string.\n :rtype: str (or unicode in Python 2, if s is unicode)\n "
codes = []
if fg:
codes.append(_color_code(fg, 30))
if bg:
codes.append(_color_code(bg, 40))
if style:
for style_part in style.split('+'):
if (style_part in STYLES):
codes.append(STYLES.index(style_part))
else:
raise ValueError(('Invalid style "%s"' % style_part))
if codes:
template = '\x1b[{0}m{1}\x1b[0m'
if (_PY2 and isinstance(s, unicode)):
template = unicode(template)
return template.format(_join(*codes), s)
else:
return s | def color(s, fg=None, bg=None, style=None):
"\n Add ANSI colors and styles to a string.\n\n :param str s: String to format.\n :param str|int|tuple fg: Foreground color specification.\n :param str|int|tuple bg: Background color specification.\n :param str: Style names, separated by '+'\n :returns: Formatted string.\n :rtype: str (or unicode in Python 2, if s is unicode)\n "
codes = []
if fg:
codes.append(_color_code(fg, 30))
if bg:
codes.append(_color_code(bg, 40))
if style:
for style_part in style.split('+'):
if (style_part in STYLES):
codes.append(STYLES.index(style_part))
else:
raise ValueError(('Invalid style "%s"' % style_part))
if codes:
template = '\x1b[{0}m{1}\x1b[0m'
if (_PY2 and isinstance(s, unicode)):
template = unicode(template)
return template.format(_join(*codes), s)
else:
return s<|docstring|>Add ANSI colors and styles to a string.
:param str s: String to format.
:param str|int|tuple fg: Foreground color specification.
:param str|int|tuple bg: Background color specification.
:param str: Style names, separated by '+'
:returns: Formatted string.
:rtype: str (or unicode in Python 2, if s is unicode)<|endoftext|> |
5f9c8e0eba5bc0eb392bc46f543c81bc60804daf8a25ab4d7e878aa97b88f3fb | def strip_color(s):
'\n Remove ANSI color/style sequences from a string. The set of all\n possibly ANSI sequences is large, so does not try to strip every\n possible one. But does strip some outliers seen not just in text\n generated by this module, but by other ANSI colorizers in the wild.\n Those include `\x1b[K` (aka EL or erase to end of line) and `\x1b[m`\n a terse version of the more common `\x1b[0m`.\n '
return re.sub('\x1b\\[(K|.*?m)', '', s) | Remove ANSI color/style sequences from a string. The set of all
possibly ANSI sequences is large, so does not try to strip every
possible one. But does strip some outliers seen not just in text
generated by this module, but by other ANSI colorizers in the wild.
Those include `[K` (aka EL or erase to end of line) and `[m`
a terse version of the more common `[0m`. | python/replicate/_vendor/colors/colors.py | strip_color | hemildesai/replicate | 810 | python | def strip_color(s):
'\n Remove ANSI color/style sequences from a string. The set of all\n possibly ANSI sequences is large, so does not try to strip every\n possible one. But does strip some outliers seen not just in text\n generated by this module, but by other ANSI colorizers in the wild.\n Those include `\x1b[K` (aka EL or erase to end of line) and `\x1b[m`\n a terse version of the more common `\x1b[0m`.\n '
return re.sub('\x1b\\[(K|.*?m)', , s) | def strip_color(s):
'\n Remove ANSI color/style sequences from a string. The set of all\n possibly ANSI sequences is large, so does not try to strip every\n possible one. But does strip some outliers seen not just in text\n generated by this module, but by other ANSI colorizers in the wild.\n Those include `\x1b[K` (aka EL or erase to end of line) and `\x1b[m`\n a terse version of the more common `\x1b[0m`.\n '
return re.sub('\x1b\\[(K|.*?m)', , s)<|docstring|>Remove ANSI color/style sequences from a string. The set of all
possibly ANSI sequences is large, so does not try to strip every
possible one. But does strip some outliers seen not just in text
generated by this module, but by other ANSI colorizers in the wild.
Those include `[K` (aka EL or erase to end of line) and `[m`
a terse version of the more common `[0m`.<|endoftext|> |
368f02d90da5e55bff2b8fad6cad0cb2a9645285308070432d776db34d07bce2 | def ansilen(s):
'\n Given a string with embedded ANSI codes, what would its\n length be without those codes?\n '
return len(strip_color(s)) | Given a string with embedded ANSI codes, what would its
length be without those codes? | python/replicate/_vendor/colors/colors.py | ansilen | hemildesai/replicate | 810 | python | def ansilen(s):
'\n Given a string with embedded ANSI codes, what would its\n length be without those codes?\n '
return len(strip_color(s)) | def ansilen(s):
'\n Given a string with embedded ANSI codes, what would its\n length be without those codes?\n '
return len(strip_color(s))<|docstring|>Given a string with embedded ANSI codes, what would its
length be without those codes?<|endoftext|> |
808be98bab91557124405131b4be7aeae57461fe915e1f3d2c41af6b8c9f36b3 | @root_validator(skip_on_failure=True)
def set_service_path(cls, values: Dict[(str, Any)]) -> Dict[(str, Any)]:
'Sets the service_path attribute value according to the component\n UUID.'
if values.get('service_path'):
return values
assert ('uuid' in values)
values['service_path'] = cls.get_service_path(values['uuid'])
return values | Sets the service_path attribute value according to the component
UUID. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | set_service_path | safoinme/zenml | 0 | python | @root_validator(skip_on_failure=True)
def set_service_path(cls, values: Dict[(str, Any)]) -> Dict[(str, Any)]:
'Sets the service_path attribute value according to the component\n UUID.'
if values.get('service_path'):
return values
assert ('uuid' in values)
values['service_path'] = cls.get_service_path(values['uuid'])
return values | @root_validator(skip_on_failure=True)
def set_service_path(cls, values: Dict[(str, Any)]) -> Dict[(str, Any)]:
'Sets the service_path attribute value according to the component\n UUID.'
if values.get('service_path'):
return values
assert ('uuid' in values)
values['service_path'] = cls.get_service_path(values['uuid'])
return values<|docstring|>Sets the service_path attribute value according to the component
UUID.<|endoftext|> |
0f641a32fcf126383cdce9fc49417a7382adbaebf7eda03c2edb06a43c6ce873 | @staticmethod
def get_service_path(uuid: uuid.UUID) -> str:
'Get the path the path where the local MLflow deployment service\n configuration, PID and log files are stored.\n\n Args:\n uuid: The UUID of the MLflow model deployer.\n\n Returns:\n The service path.\n '
service_path = os.path.join(get_global_config_directory(), LOCAL_STORES_DIRECTORY_NAME, str(uuid))
create_dir_recursive_if_not_exists(service_path)
return service_path | Get the path the path where the local MLflow deployment service
configuration, PID and log files are stored.
Args:
uuid: The UUID of the MLflow model deployer.
Returns:
The service path. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | get_service_path | safoinme/zenml | 0 | python | @staticmethod
def get_service_path(uuid: uuid.UUID) -> str:
'Get the path the path where the local MLflow deployment service\n configuration, PID and log files are stored.\n\n Args:\n uuid: The UUID of the MLflow model deployer.\n\n Returns:\n The service path.\n '
service_path = os.path.join(get_global_config_directory(), LOCAL_STORES_DIRECTORY_NAME, str(uuid))
create_dir_recursive_if_not_exists(service_path)
return service_path | @staticmethod
def get_service_path(uuid: uuid.UUID) -> str:
'Get the path the path where the local MLflow deployment service\n configuration, PID and log files are stored.\n\n Args:\n uuid: The UUID of the MLflow model deployer.\n\n Returns:\n The service path.\n '
service_path = os.path.join(get_global_config_directory(), LOCAL_STORES_DIRECTORY_NAME, str(uuid))
create_dir_recursive_if_not_exists(service_path)
return service_path<|docstring|>Get the path the path where the local MLflow deployment service
configuration, PID and log files are stored.
Args:
uuid: The UUID of the MLflow model deployer.
Returns:
The service path.<|endoftext|> |
cd176d3eb1f0840964adcbb240285b4955fff4df2afab8790bc627f727a9aefd | @property
def local_path(self) -> str:
'\n Returns the path to the root directory where all configurations for\n MLflow deployment daemon processes are stored.\n\n Returns:\n The path to the local service root directory.\n '
return self.service_path | Returns the path to the root directory where all configurations for
MLflow deployment daemon processes are stored.
Returns:
The path to the local service root directory. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | local_path | safoinme/zenml | 0 | python | @property
def local_path(self) -> str:
'\n Returns the path to the root directory where all configurations for\n MLflow deployment daemon processes are stored.\n\n Returns:\n The path to the local service root directory.\n '
return self.service_path | @property
def local_path(self) -> str:
'\n Returns the path to the root directory where all configurations for\n MLflow deployment daemon processes are stored.\n\n Returns:\n The path to the local service root directory.\n '
return self.service_path<|docstring|>Returns the path to the root directory where all configurations for
MLflow deployment daemon processes are stored.
Returns:
The path to the local service root directory.<|endoftext|> |
39d294776e32fd6ee9d953abfd1b98712d29b15cd77bc33050302bc97f1d64a4 | @staticmethod
def get_model_server_info(service_instance: 'MLFlowDeploymentService') -> Dict[(str, Optional[str])]:
'Return implementation specific information that might be relevant\n to the user.\n\n Args:\n service_instance: Instance of a SeldonDeploymentService\n '
return {'PREDICTION_URL': service_instance.endpoint.prediction_url, 'MODEL_URI': service_instance.config.model_uri, 'MODEL_NAME': service_instance.config.model_name, 'SERVICE_PATH': service_instance.status.runtime_path, 'DAEMON_PID': str(service_instance.status.pid)} | Return implementation specific information that might be relevant
to the user.
Args:
service_instance: Instance of a SeldonDeploymentService | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | get_model_server_info | safoinme/zenml | 0 | python | @staticmethod
def get_model_server_info(service_instance: 'MLFlowDeploymentService') -> Dict[(str, Optional[str])]:
'Return implementation specific information that might be relevant\n to the user.\n\n Args:\n service_instance: Instance of a SeldonDeploymentService\n '
return {'PREDICTION_URL': service_instance.endpoint.prediction_url, 'MODEL_URI': service_instance.config.model_uri, 'MODEL_NAME': service_instance.config.model_name, 'SERVICE_PATH': service_instance.status.runtime_path, 'DAEMON_PID': str(service_instance.status.pid)} | @staticmethod
def get_model_server_info(service_instance: 'MLFlowDeploymentService') -> Dict[(str, Optional[str])]:
'Return implementation specific information that might be relevant\n to the user.\n\n Args:\n service_instance: Instance of a SeldonDeploymentService\n '
return {'PREDICTION_URL': service_instance.endpoint.prediction_url, 'MODEL_URI': service_instance.config.model_uri, 'MODEL_NAME': service_instance.config.model_name, 'SERVICE_PATH': service_instance.status.runtime_path, 'DAEMON_PID': str(service_instance.status.pid)}<|docstring|>Return implementation specific information that might be relevant
to the user.
Args:
service_instance: Instance of a SeldonDeploymentService<|endoftext|> |
5ac4bc304c08e934b445553fe60d149febf8a13cba220d72b4f481d4d3e7a263 | @staticmethod
def get_active_model_deployer() -> 'MLFlowModelDeployer':
'\n Returns the MLFlowModelDeployer component of the active stack.\n\n Args:\n None\n\n Returns:\n The MLFlowModelDeployer component of the active stack.\n '
model_deployer = Repository(skip_repository_check=True).active_stack.model_deployer
if ((not model_deployer) or (not isinstance(model_deployer, MLFlowModelDeployer))):
raise TypeError(f'''The active stack needs to have an MLflow model deployer component registered to be able to deploy models with MLflow. You can create a new stack with an MLflow model deployer component or update your existing stack to add this component, e.g.:
'zenml model-deployer register mlflow --flavor={MLFLOW_MODEL_DEPLOYER_FLAVOR}'
'zenml stack create stack-name -d mlflow ...'
''')
return model_deployer | Returns the MLFlowModelDeployer component of the active stack.
Args:
None
Returns:
The MLFlowModelDeployer component of the active stack. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | get_active_model_deployer | safoinme/zenml | 0 | python | @staticmethod
def get_active_model_deployer() -> 'MLFlowModelDeployer':
'\n Returns the MLFlowModelDeployer component of the active stack.\n\n Args:\n None\n\n Returns:\n The MLFlowModelDeployer component of the active stack.\n '
model_deployer = Repository(skip_repository_check=True).active_stack.model_deployer
if ((not model_deployer) or (not isinstance(model_deployer, MLFlowModelDeployer))):
raise TypeError(f'The active stack needs to have an MLflow model deployer component registered to be able to deploy models with MLflow. You can create a new stack with an MLflow model deployer component or update your existing stack to add this component, e.g.:
'zenml model-deployer register mlflow --flavor={MLFLOW_MODEL_DEPLOYER_FLAVOR}'
'zenml stack create stack-name -d mlflow ...'
')
return model_deployer | @staticmethod
def get_active_model_deployer() -> 'MLFlowModelDeployer':
'\n Returns the MLFlowModelDeployer component of the active stack.\n\n Args:\n None\n\n Returns:\n The MLFlowModelDeployer component of the active stack.\n '
model_deployer = Repository(skip_repository_check=True).active_stack.model_deployer
if ((not model_deployer) or (not isinstance(model_deployer, MLFlowModelDeployer))):
raise TypeError(f'The active stack needs to have an MLflow model deployer component registered to be able to deploy models with MLflow. You can create a new stack with an MLflow model deployer component or update your existing stack to add this component, e.g.:
'zenml model-deployer register mlflow --flavor={MLFLOW_MODEL_DEPLOYER_FLAVOR}'
'zenml stack create stack-name -d mlflow ...'
')
return model_deployer<|docstring|>Returns the MLFlowModelDeployer component of the active stack.
Args:
None
Returns:
The MLFlowModelDeployer component of the active stack.<|endoftext|> |
c502866bf685428e86d24451b5476f9f7b3ed7104d2585573ea4c7e8213259b1 | def deploy_model(self, config: ServiceConfig, replace: bool=False, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT) -> BaseService:
"Create a new MLflow deployment service or update an existing one to\n serve the supplied model and deployment configuration.\n\n This method has two modes of operation, depending on the `replace`\n argument value:\n\n * if `replace` is False, calling this method will create a new MLflow\n deployment server to reflect the model and other configuration\n parameters specified in the supplied MLflow service `config`.\n\n * if `replace` is True, this method will first attempt to find an\n existing MLflow deployment service that is *equivalent* to the\n supplied configuration parameters. Two or more MLflow deployment\n services are considered equivalent if they have the same\n `pipeline_name`, `pipeline_step_name` and `model_name` configuration\n parameters. To put it differently, two MLflow deployment services\n are equivalent if they serve versions of the same model deployed by\n the same pipeline step. If an equivalent MLflow deployment is found,\n it will be updated in place to reflect the new configuration\n parameters.\n\n Callers should set `replace` to True if they want a continuous model\n deployment workflow that doesn't spin up a new MLflow deployment\n server for each new model version. If multiple equivalent MLflow\n deployment servers are found, one is selected at random to be updated\n and the others are deleted.\n\n Args:\n config: the configuration of the model to be deployed with MLflow.\n replace: set this flag to True to find and update an equivalent\n MLflow deployment server with the new model instead of\n creating and starting a new deployment server.\n timeout: the timeout in seconds to wait for the MLflow server\n to be provisioned and successfully started or updated. If set\n to 0, the method will return immediately after the MLflow\n server is provisioned, without waiting for it to fully start.\n\n Returns:\n The ZenML MLflow deployment service object that can be used to\n interact with the MLflow model server.\n\n Raises:\n RuntimeError: if `timeout` is set to a positive value that is\n exceeded while waiting for the MLflow deployment server\n to start, or if an operational failure is encountered before\n it reaches a ready state.\n "
config = cast(MLFlowDeploymentConfig, config)
service = None
if (replace is True):
existing_services = self.find_model_server(pipeline_name=config.pipeline_name, pipeline_step_name=config.pipeline_step_name, model_name=config.model_name)
for existing_service in existing_services:
if (service is None):
service = cast(MLFlowDeploymentService, existing_service)
try:
self._clean_up_existing_service(existing_service=cast(MLFlowDeploymentService, existing_service), timeout=timeout, force=True)
except RuntimeError:
pass
if service:
logger.info(f'Updating an existing MLflow deployment service: {service}')
config.root_runtime_path = self.local_path
service.stop(timeout=timeout, force=True)
service.update(config)
service.start(timeout=timeout)
else:
service = self._create_new_service(timeout, config)
logger.info(f'Created a new MLflow deployment service: {service}')
return cast(BaseService, service) | Create a new MLflow deployment service or update an existing one to
serve the supplied model and deployment configuration.
This method has two modes of operation, depending on the `replace`
argument value:
* if `replace` is False, calling this method will create a new MLflow
deployment server to reflect the model and other configuration
parameters specified in the supplied MLflow service `config`.
* if `replace` is True, this method will first attempt to find an
existing MLflow deployment service that is *equivalent* to the
supplied configuration parameters. Two or more MLflow deployment
services are considered equivalent if they have the same
`pipeline_name`, `pipeline_step_name` and `model_name` configuration
parameters. To put it differently, two MLflow deployment services
are equivalent if they serve versions of the same model deployed by
the same pipeline step. If an equivalent MLflow deployment is found,
it will be updated in place to reflect the new configuration
parameters.
Callers should set `replace` to True if they want a continuous model
deployment workflow that doesn't spin up a new MLflow deployment
server for each new model version. If multiple equivalent MLflow
deployment servers are found, one is selected at random to be updated
and the others are deleted.
Args:
config: the configuration of the model to be deployed with MLflow.
replace: set this flag to True to find and update an equivalent
MLflow deployment server with the new model instead of
creating and starting a new deployment server.
timeout: the timeout in seconds to wait for the MLflow server
to be provisioned and successfully started or updated. If set
to 0, the method will return immediately after the MLflow
server is provisioned, without waiting for it to fully start.
Returns:
The ZenML MLflow deployment service object that can be used to
interact with the MLflow model server.
Raises:
RuntimeError: if `timeout` is set to a positive value that is
exceeded while waiting for the MLflow deployment server
to start, or if an operational failure is encountered before
it reaches a ready state. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | deploy_model | safoinme/zenml | 0 | python | def deploy_model(self, config: ServiceConfig, replace: bool=False, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT) -> BaseService:
"Create a new MLflow deployment service or update an existing one to\n serve the supplied model and deployment configuration.\n\n This method has two modes of operation, depending on the `replace`\n argument value:\n\n * if `replace` is False, calling this method will create a new MLflow\n deployment server to reflect the model and other configuration\n parameters specified in the supplied MLflow service `config`.\n\n * if `replace` is True, this method will first attempt to find an\n existing MLflow deployment service that is *equivalent* to the\n supplied configuration parameters. Two or more MLflow deployment\n services are considered equivalent if they have the same\n `pipeline_name`, `pipeline_step_name` and `model_name` configuration\n parameters. To put it differently, two MLflow deployment services\n are equivalent if they serve versions of the same model deployed by\n the same pipeline step. If an equivalent MLflow deployment is found,\n it will be updated in place to reflect the new configuration\n parameters.\n\n Callers should set `replace` to True if they want a continuous model\n deployment workflow that doesn't spin up a new MLflow deployment\n server for each new model version. If multiple equivalent MLflow\n deployment servers are found, one is selected at random to be updated\n and the others are deleted.\n\n Args:\n config: the configuration of the model to be deployed with MLflow.\n replace: set this flag to True to find and update an equivalent\n MLflow deployment server with the new model instead of\n creating and starting a new deployment server.\n timeout: the timeout in seconds to wait for the MLflow server\n to be provisioned and successfully started or updated. If set\n to 0, the method will return immediately after the MLflow\n server is provisioned, without waiting for it to fully start.\n\n Returns:\n The ZenML MLflow deployment service object that can be used to\n interact with the MLflow model server.\n\n Raises:\n RuntimeError: if `timeout` is set to a positive value that is\n exceeded while waiting for the MLflow deployment server\n to start, or if an operational failure is encountered before\n it reaches a ready state.\n "
config = cast(MLFlowDeploymentConfig, config)
service = None
if (replace is True):
existing_services = self.find_model_server(pipeline_name=config.pipeline_name, pipeline_step_name=config.pipeline_step_name, model_name=config.model_name)
for existing_service in existing_services:
if (service is None):
service = cast(MLFlowDeploymentService, existing_service)
try:
self._clean_up_existing_service(existing_service=cast(MLFlowDeploymentService, existing_service), timeout=timeout, force=True)
except RuntimeError:
pass
if service:
logger.info(f'Updating an existing MLflow deployment service: {service}')
config.root_runtime_path = self.local_path
service.stop(timeout=timeout, force=True)
service.update(config)
service.start(timeout=timeout)
else:
service = self._create_new_service(timeout, config)
logger.info(f'Created a new MLflow deployment service: {service}')
return cast(BaseService, service) | def deploy_model(self, config: ServiceConfig, replace: bool=False, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT) -> BaseService:
"Create a new MLflow deployment service or update an existing one to\n serve the supplied model and deployment configuration.\n\n This method has two modes of operation, depending on the `replace`\n argument value:\n\n * if `replace` is False, calling this method will create a new MLflow\n deployment server to reflect the model and other configuration\n parameters specified in the supplied MLflow service `config`.\n\n * if `replace` is True, this method will first attempt to find an\n existing MLflow deployment service that is *equivalent* to the\n supplied configuration parameters. Two or more MLflow deployment\n services are considered equivalent if they have the same\n `pipeline_name`, `pipeline_step_name` and `model_name` configuration\n parameters. To put it differently, two MLflow deployment services\n are equivalent if they serve versions of the same model deployed by\n the same pipeline step. If an equivalent MLflow deployment is found,\n it will be updated in place to reflect the new configuration\n parameters.\n\n Callers should set `replace` to True if they want a continuous model\n deployment workflow that doesn't spin up a new MLflow deployment\n server for each new model version. If multiple equivalent MLflow\n deployment servers are found, one is selected at random to be updated\n and the others are deleted.\n\n Args:\n config: the configuration of the model to be deployed with MLflow.\n replace: set this flag to True to find and update an equivalent\n MLflow deployment server with the new model instead of\n creating and starting a new deployment server.\n timeout: the timeout in seconds to wait for the MLflow server\n to be provisioned and successfully started or updated. If set\n to 0, the method will return immediately after the MLflow\n server is provisioned, without waiting for it to fully start.\n\n Returns:\n The ZenML MLflow deployment service object that can be used to\n interact with the MLflow model server.\n\n Raises:\n RuntimeError: if `timeout` is set to a positive value that is\n exceeded while waiting for the MLflow deployment server\n to start, or if an operational failure is encountered before\n it reaches a ready state.\n "
config = cast(MLFlowDeploymentConfig, config)
service = None
if (replace is True):
existing_services = self.find_model_server(pipeline_name=config.pipeline_name, pipeline_step_name=config.pipeline_step_name, model_name=config.model_name)
for existing_service in existing_services:
if (service is None):
service = cast(MLFlowDeploymentService, existing_service)
try:
self._clean_up_existing_service(existing_service=cast(MLFlowDeploymentService, existing_service), timeout=timeout, force=True)
except RuntimeError:
pass
if service:
logger.info(f'Updating an existing MLflow deployment service: {service}')
config.root_runtime_path = self.local_path
service.stop(timeout=timeout, force=True)
service.update(config)
service.start(timeout=timeout)
else:
service = self._create_new_service(timeout, config)
logger.info(f'Created a new MLflow deployment service: {service}')
return cast(BaseService, service)<|docstring|>Create a new MLflow deployment service or update an existing one to
serve the supplied model and deployment configuration.
This method has two modes of operation, depending on the `replace`
argument value:
* if `replace` is False, calling this method will create a new MLflow
deployment server to reflect the model and other configuration
parameters specified in the supplied MLflow service `config`.
* if `replace` is True, this method will first attempt to find an
existing MLflow deployment service that is *equivalent* to the
supplied configuration parameters. Two or more MLflow deployment
services are considered equivalent if they have the same
`pipeline_name`, `pipeline_step_name` and `model_name` configuration
parameters. To put it differently, two MLflow deployment services
are equivalent if they serve versions of the same model deployed by
the same pipeline step. If an equivalent MLflow deployment is found,
it will be updated in place to reflect the new configuration
parameters.
Callers should set `replace` to True if they want a continuous model
deployment workflow that doesn't spin up a new MLflow deployment
server for each new model version. If multiple equivalent MLflow
deployment servers are found, one is selected at random to be updated
and the others are deleted.
Args:
config: the configuration of the model to be deployed with MLflow.
replace: set this flag to True to find and update an equivalent
MLflow deployment server with the new model instead of
creating and starting a new deployment server.
timeout: the timeout in seconds to wait for the MLflow server
to be provisioned and successfully started or updated. If set
to 0, the method will return immediately after the MLflow
server is provisioned, without waiting for it to fully start.
Returns:
The ZenML MLflow deployment service object that can be used to
interact with the MLflow model server.
Raises:
RuntimeError: if `timeout` is set to a positive value that is
exceeded while waiting for the MLflow deployment server
to start, or if an operational failure is encountered before
it reaches a ready state.<|endoftext|> |
a88936cbe14ea5f109172959cc3ef944b40c45f9002ac73cc3fb123c171760e0 | def _create_new_service(self, timeout: int, config: MLFlowDeploymentConfig) -> MLFlowDeploymentService:
'Creates a new MLFlowDeploymentService.'
config.root_runtime_path = self.local_path
service = MLFlowDeploymentService(config)
service.start(timeout=timeout)
return service | Creates a new MLFlowDeploymentService. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | _create_new_service | safoinme/zenml | 0 | python | def _create_new_service(self, timeout: int, config: MLFlowDeploymentConfig) -> MLFlowDeploymentService:
config.root_runtime_path = self.local_path
service = MLFlowDeploymentService(config)
service.start(timeout=timeout)
return service | def _create_new_service(self, timeout: int, config: MLFlowDeploymentConfig) -> MLFlowDeploymentService:
config.root_runtime_path = self.local_path
service = MLFlowDeploymentService(config)
service.start(timeout=timeout)
return service<|docstring|>Creates a new MLFlowDeploymentService.<|endoftext|> |
c0fcb86276c8d9e861407ac153440492a107d9230e892d69aa08a7206913b843 | def find_model_server(self, running: bool=False, service_uuid: Optional[UUID]=None, pipeline_name: Optional[str]=None, pipeline_run_id: Optional[str]=None, pipeline_step_name: Optional[str]=None, model_name: Optional[str]=None, model_uri: Optional[str]=None, model_type: Optional[str]=None) -> List[BaseService]:
'Method to find one or more model servers that match the\n given criteria.\n\n Args:\n running: If true, only running services will be returned.\n service_uuid: The UUID of the service that was originally used\n to deploy the model.\n pipeline_name: Name of the pipeline that the deployed model was part\n of.\n pipeline_run_id: ID of the pipeline run which the deployed model\n was part of.\n pipeline_step_name: The name of the pipeline model deployment step\n that deployed the model.\n model_name: Name of the deployed model.\n model_uri: URI of the deployed model.\n model_type: Type/format of the deployed model. Not used in this\n MLflow case.\n\n Returns:\n One or more Service objects representing model servers that match\n the input search criteria.\n '
services = []
config = MLFlowDeploymentConfig(model_name=(model_name or ''), model_uri=(model_uri or ''), pipeline_name=(pipeline_name or ''), pipeline_run_id=(pipeline_run_id or ''), pipeline_step_name=(pipeline_step_name or ''))
for (root, _, files) in os.walk(self.local_path):
if (service_uuid and (Path(root).name != str(service_uuid))):
continue
for file in files:
if (file == SERVICE_DAEMON_CONFIG_FILE_NAME):
service_config_path = os.path.join(root, file)
logger.debug('Loading service daemon configuration from %s', service_config_path)
existing_service_config = None
with open(service_config_path, 'r') as f:
existing_service_config = f.read()
existing_service = ServiceRegistry().load_service_from_json(existing_service_config)
if (not isinstance(existing_service, MLFlowDeploymentService)):
raise TypeError(f'Expected service type MLFlowDeploymentService but got {type(existing_service)} instead')
existing_service.update_status()
if self._matches_search_criteria(existing_service, config):
if ((not running) or existing_service.is_running):
services.append(cast(BaseService, existing_service))
return services | Method to find one or more model servers that match the
given criteria.
Args:
running: If true, only running services will be returned.
service_uuid: The UUID of the service that was originally used
to deploy the model.
pipeline_name: Name of the pipeline that the deployed model was part
of.
pipeline_run_id: ID of the pipeline run which the deployed model
was part of.
pipeline_step_name: The name of the pipeline model deployment step
that deployed the model.
model_name: Name of the deployed model.
model_uri: URI of the deployed model.
model_type: Type/format of the deployed model. Not used in this
MLflow case.
Returns:
One or more Service objects representing model servers that match
the input search criteria. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | find_model_server | safoinme/zenml | 0 | python | def find_model_server(self, running: bool=False, service_uuid: Optional[UUID]=None, pipeline_name: Optional[str]=None, pipeline_run_id: Optional[str]=None, pipeline_step_name: Optional[str]=None, model_name: Optional[str]=None, model_uri: Optional[str]=None, model_type: Optional[str]=None) -> List[BaseService]:
'Method to find one or more model servers that match the\n given criteria.\n\n Args:\n running: If true, only running services will be returned.\n service_uuid: The UUID of the service that was originally used\n to deploy the model.\n pipeline_name: Name of the pipeline that the deployed model was part\n of.\n pipeline_run_id: ID of the pipeline run which the deployed model\n was part of.\n pipeline_step_name: The name of the pipeline model deployment step\n that deployed the model.\n model_name: Name of the deployed model.\n model_uri: URI of the deployed model.\n model_type: Type/format of the deployed model. Not used in this\n MLflow case.\n\n Returns:\n One or more Service objects representing model servers that match\n the input search criteria.\n '
services = []
config = MLFlowDeploymentConfig(model_name=(model_name or ), model_uri=(model_uri or ), pipeline_name=(pipeline_name or ), pipeline_run_id=(pipeline_run_id or ), pipeline_step_name=(pipeline_step_name or ))
for (root, _, files) in os.walk(self.local_path):
if (service_uuid and (Path(root).name != str(service_uuid))):
continue
for file in files:
if (file == SERVICE_DAEMON_CONFIG_FILE_NAME):
service_config_path = os.path.join(root, file)
logger.debug('Loading service daemon configuration from %s', service_config_path)
existing_service_config = None
with open(service_config_path, 'r') as f:
existing_service_config = f.read()
existing_service = ServiceRegistry().load_service_from_json(existing_service_config)
if (not isinstance(existing_service, MLFlowDeploymentService)):
raise TypeError(f'Expected service type MLFlowDeploymentService but got {type(existing_service)} instead')
existing_service.update_status()
if self._matches_search_criteria(existing_service, config):
if ((not running) or existing_service.is_running):
services.append(cast(BaseService, existing_service))
return services | def find_model_server(self, running: bool=False, service_uuid: Optional[UUID]=None, pipeline_name: Optional[str]=None, pipeline_run_id: Optional[str]=None, pipeline_step_name: Optional[str]=None, model_name: Optional[str]=None, model_uri: Optional[str]=None, model_type: Optional[str]=None) -> List[BaseService]:
'Method to find one or more model servers that match the\n given criteria.\n\n Args:\n running: If true, only running services will be returned.\n service_uuid: The UUID of the service that was originally used\n to deploy the model.\n pipeline_name: Name of the pipeline that the deployed model was part\n of.\n pipeline_run_id: ID of the pipeline run which the deployed model\n was part of.\n pipeline_step_name: The name of the pipeline model deployment step\n that deployed the model.\n model_name: Name of the deployed model.\n model_uri: URI of the deployed model.\n model_type: Type/format of the deployed model. Not used in this\n MLflow case.\n\n Returns:\n One or more Service objects representing model servers that match\n the input search criteria.\n '
services = []
config = MLFlowDeploymentConfig(model_name=(model_name or ), model_uri=(model_uri or ), pipeline_name=(pipeline_name or ), pipeline_run_id=(pipeline_run_id or ), pipeline_step_name=(pipeline_step_name or ))
for (root, _, files) in os.walk(self.local_path):
if (service_uuid and (Path(root).name != str(service_uuid))):
continue
for file in files:
if (file == SERVICE_DAEMON_CONFIG_FILE_NAME):
service_config_path = os.path.join(root, file)
logger.debug('Loading service daemon configuration from %s', service_config_path)
existing_service_config = None
with open(service_config_path, 'r') as f:
existing_service_config = f.read()
existing_service = ServiceRegistry().load_service_from_json(existing_service_config)
if (not isinstance(existing_service, MLFlowDeploymentService)):
raise TypeError(f'Expected service type MLFlowDeploymentService but got {type(existing_service)} instead')
existing_service.update_status()
if self._matches_search_criteria(existing_service, config):
if ((not running) or existing_service.is_running):
services.append(cast(BaseService, existing_service))
return services<|docstring|>Method to find one or more model servers that match the
given criteria.
Args:
running: If true, only running services will be returned.
service_uuid: The UUID of the service that was originally used
to deploy the model.
pipeline_name: Name of the pipeline that the deployed model was part
of.
pipeline_run_id: ID of the pipeline run which the deployed model
was part of.
pipeline_step_name: The name of the pipeline model deployment step
that deployed the model.
model_name: Name of the deployed model.
model_uri: URI of the deployed model.
model_type: Type/format of the deployed model. Not used in this
MLflow case.
Returns:
One or more Service objects representing model servers that match
the input search criteria.<|endoftext|> |
5ecfa48d6f5b2d8f7776f7b8192602adeee64b33ca3f35500b0f1cd9e290089d | def _matches_search_criteria(self, existing_service: MLFlowDeploymentService, config: MLFlowDeploymentConfig) -> bool:
'Returns true if a service matches the input criteria. If any of\n the values in the input criteria are None, they are ignored. This\n allows listing services just by common pipeline names or step names,\n etc.\n\n Args:\n existing_service: The materialized Service instance derived from\n the config of the older (existing) service\n config: The MLFlowDeploymentConfig object passed to the\n deploy_model function holding parameters of the new service\n to be created.\n '
existing_service_config = existing_service.config
if (((not config.pipeline_name) or (existing_service_config.pipeline_name == config.pipeline_name)) and ((not config.model_name) or (existing_service_config.model_name == config.model_name)) and ((not config.pipeline_step_name) or (existing_service_config.pipeline_step_name == config.pipeline_step_name)) and ((not config.pipeline_run_id) or (existing_service_config.pipeline_run_id == config.pipeline_run_id))):
return True
return False | Returns true if a service matches the input criteria. If any of
the values in the input criteria are None, they are ignored. This
allows listing services just by common pipeline names or step names,
etc.
Args:
existing_service: The materialized Service instance derived from
the config of the older (existing) service
config: The MLFlowDeploymentConfig object passed to the
deploy_model function holding parameters of the new service
to be created. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | _matches_search_criteria | safoinme/zenml | 0 | python | def _matches_search_criteria(self, existing_service: MLFlowDeploymentService, config: MLFlowDeploymentConfig) -> bool:
'Returns true if a service matches the input criteria. If any of\n the values in the input criteria are None, they are ignored. This\n allows listing services just by common pipeline names or step names,\n etc.\n\n Args:\n existing_service: The materialized Service instance derived from\n the config of the older (existing) service\n config: The MLFlowDeploymentConfig object passed to the\n deploy_model function holding parameters of the new service\n to be created.\n '
existing_service_config = existing_service.config
if (((not config.pipeline_name) or (existing_service_config.pipeline_name == config.pipeline_name)) and ((not config.model_name) or (existing_service_config.model_name == config.model_name)) and ((not config.pipeline_step_name) or (existing_service_config.pipeline_step_name == config.pipeline_step_name)) and ((not config.pipeline_run_id) or (existing_service_config.pipeline_run_id == config.pipeline_run_id))):
return True
return False | def _matches_search_criteria(self, existing_service: MLFlowDeploymentService, config: MLFlowDeploymentConfig) -> bool:
'Returns true if a service matches the input criteria. If any of\n the values in the input criteria are None, they are ignored. This\n allows listing services just by common pipeline names or step names,\n etc.\n\n Args:\n existing_service: The materialized Service instance derived from\n the config of the older (existing) service\n config: The MLFlowDeploymentConfig object passed to the\n deploy_model function holding parameters of the new service\n to be created.\n '
existing_service_config = existing_service.config
if (((not config.pipeline_name) or (existing_service_config.pipeline_name == config.pipeline_name)) and ((not config.model_name) or (existing_service_config.model_name == config.model_name)) and ((not config.pipeline_step_name) or (existing_service_config.pipeline_step_name == config.pipeline_step_name)) and ((not config.pipeline_run_id) or (existing_service_config.pipeline_run_id == config.pipeline_run_id))):
return True
return False<|docstring|>Returns true if a service matches the input criteria. If any of
the values in the input criteria are None, they are ignored. This
allows listing services just by common pipeline names or step names,
etc.
Args:
existing_service: The materialized Service instance derived from
the config of the older (existing) service
config: The MLFlowDeploymentConfig object passed to the
deploy_model function holding parameters of the new service
to be created.<|endoftext|> |
de4c026fbc6f186a938a29e2d164b016df6d382d3a1bb8404e4e875c66e92ba1 | def stop_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT, force: bool=False) -> None:
'Method to stop a model server.\n\n Args:\n uuid: UUID of the model server to stop.\n timeout: Timeout in seconds to wait for the service to stop.\n force: If True, force the service to stop.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
existing_services[0].stop(timeout=timeout, force=force) | Method to stop a model server.
Args:
uuid: UUID of the model server to stop.
timeout: Timeout in seconds to wait for the service to stop.
force: If True, force the service to stop. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | stop_model_server | safoinme/zenml | 0 | python | def stop_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT, force: bool=False) -> None:
'Method to stop a model server.\n\n Args:\n uuid: UUID of the model server to stop.\n timeout: Timeout in seconds to wait for the service to stop.\n force: If True, force the service to stop.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
existing_services[0].stop(timeout=timeout, force=force) | def stop_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT, force: bool=False) -> None:
'Method to stop a model server.\n\n Args:\n uuid: UUID of the model server to stop.\n timeout: Timeout in seconds to wait for the service to stop.\n force: If True, force the service to stop.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
existing_services[0].stop(timeout=timeout, force=force)<|docstring|>Method to stop a model server.
Args:
uuid: UUID of the model server to stop.
timeout: Timeout in seconds to wait for the service to stop.
force: If True, force the service to stop.<|endoftext|> |
b34402124ededfc93df61cb7957bf491eb29da3c90f3b2fc8808a47f9a70fe80 | def start_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT) -> None:
'Method to start a model server.\n\n Args:\n uuid: UUID of the model server to start.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
existing_services[0].start(timeout=timeout) | Method to start a model server.
Args:
uuid: UUID of the model server to start. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | start_model_server | safoinme/zenml | 0 | python | def start_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT) -> None:
'Method to start a model server.\n\n Args:\n uuid: UUID of the model server to start.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
existing_services[0].start(timeout=timeout) | def start_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT) -> None:
'Method to start a model server.\n\n Args:\n uuid: UUID of the model server to start.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
existing_services[0].start(timeout=timeout)<|docstring|>Method to start a model server.
Args:
uuid: UUID of the model server to start.<|endoftext|> |
4c425c84733f2435390cb2f9ec2034912f6e81014d4c26040441ef2b2e5372fa | def delete_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT, force: bool=False) -> None:
'Method to delete all configuration of a model server.\n\n Args:\n uuid: UUID of the model server to delete.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
service = cast(MLFlowDeploymentService, existing_services[0])
self._clean_up_existing_service(existing_service=service, timeout=timeout, force=force) | Method to delete all configuration of a model server.
Args:
uuid: UUID of the model server to delete. | src/zenml/integrations/mlflow/model_deployers/mlflow_model_deployer.py | delete_model_server | safoinme/zenml | 0 | python | def delete_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT, force: bool=False) -> None:
'Method to delete all configuration of a model server.\n\n Args:\n uuid: UUID of the model server to delete.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
service = cast(MLFlowDeploymentService, existing_services[0])
self._clean_up_existing_service(existing_service=service, timeout=timeout, force=force) | def delete_model_server(self, uuid: UUID, timeout: int=DEFAULT_SERVICE_START_STOP_TIMEOUT, force: bool=False) -> None:
'Method to delete all configuration of a model server.\n\n Args:\n uuid: UUID of the model server to delete.\n '
existing_services = self.find_model_server(service_uuid=uuid)
if existing_services:
service = cast(MLFlowDeploymentService, existing_services[0])
self._clean_up_existing_service(existing_service=service, timeout=timeout, force=force)<|docstring|>Method to delete all configuration of a model server.
Args:
uuid: UUID of the model server to delete.<|endoftext|> |
1734c27f4ad2bd408a9138d13b777fb0a33fc10f313c2f1d4e9ea60e7f886e71 | def all_using_get4(self, **kwargs):
'Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.all_using_get4(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: list[Mapstringstring]\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.all_using_get4_with_http_info(**kwargs)
else:
data = self.all_using_get4_with_http_info(**kwargs)
return data | Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.all_using_get4(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Mapstringstring]
If the method is called asynchronously,
returns the request thread. | spinnaker_swagger_client/api/pubsub_subscription_controller_api.py | all_using_get4 | coveooss/spinnaker_python_client | 0 | python | def all_using_get4(self, **kwargs):
'Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.all_using_get4(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: list[Mapstringstring]\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.all_using_get4_with_http_info(**kwargs)
else:
data = self.all_using_get4_with_http_info(**kwargs)
return data | def all_using_get4(self, **kwargs):
'Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.all_using_get4(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: list[Mapstringstring]\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.all_using_get4_with_http_info(**kwargs)
else:
data = self.all_using_get4_with_http_info(**kwargs)
return data<|docstring|>Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.all_using_get4(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Mapstringstring]
If the method is called asynchronously,
returns the request thread.<|endoftext|> |
ca881e1d486117e2d9dd4ce1582cbdee37f7e418ee8567ab1eb15fb09d87d6fc | def all_using_get4_with_http_info(self, **kwargs):
'Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.all_using_get4_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: list[Mapstringstring]\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method all_using_get4" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = []
return self.api_client.call_api('/pubsub/subscriptions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Mapstringstring]', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) | Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.all_using_get4_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Mapstringstring]
If the method is called asynchronously,
returns the request thread. | spinnaker_swagger_client/api/pubsub_subscription_controller_api.py | all_using_get4_with_http_info | coveooss/spinnaker_python_client | 0 | python | def all_using_get4_with_http_info(self, **kwargs):
'Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.all_using_get4_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: list[Mapstringstring]\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method all_using_get4" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = []
return self.api_client.call_api('/pubsub/subscriptions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Mapstringstring]', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) | def all_using_get4_with_http_info(self, **kwargs):
'Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.all_using_get4_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: list[Mapstringstring]\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method all_using_get4" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = []
return self.api_client.call_api('/pubsub/subscriptions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Mapstringstring]', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)<|docstring|>Retrieve the list of pub/sub subscriptions configured in Echo. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.all_using_get4_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Mapstringstring]
If the method is called asynchronously,
returns the request thread.<|endoftext|> |
cbd72ba2aafbdee6c34ad083c07067ab2b459f373f1e706ae59ddda797c182d2 | def set_dynamic_width_and_height(self, screen_geometry, width_ratio=0.5, height_ratio=0.5):
'\n Update width and height using an updated screen geometry.\n Use a ratio for the width and height of the dialog.\n '
screen_width = int((screen_geometry.width() * width_ratio))
screen_height = int((screen_geometry.height() * height_ratio))
self.resize(screen_width, screen_height)
x = int((screen_geometry.center().x() - (self.width() / 2)))
y = int((screen_geometry.center().y() - (self.height() / 2)))
self.move(x, y) | Update width and height using an updated screen geometry.
Use a ratio for the width and height of the dialog. | spyder/plugins/variableexplorer/widgets/basedialog.py | set_dynamic_width_and_height | mrclary/spyder | 3 | python | def set_dynamic_width_and_height(self, screen_geometry, width_ratio=0.5, height_ratio=0.5):
'\n Update width and height using an updated screen geometry.\n Use a ratio for the width and height of the dialog.\n '
screen_width = int((screen_geometry.width() * width_ratio))
screen_height = int((screen_geometry.height() * height_ratio))
self.resize(screen_width, screen_height)
x = int((screen_geometry.center().x() - (self.width() / 2)))
y = int((screen_geometry.center().y() - (self.height() / 2)))
self.move(x, y) | def set_dynamic_width_and_height(self, screen_geometry, width_ratio=0.5, height_ratio=0.5):
'\n Update width and height using an updated screen geometry.\n Use a ratio for the width and height of the dialog.\n '
screen_width = int((screen_geometry.width() * width_ratio))
screen_height = int((screen_geometry.height() * height_ratio))
self.resize(screen_width, screen_height)
x = int((screen_geometry.center().x() - (self.width() / 2)))
y = int((screen_geometry.center().y() - (self.height() / 2)))
self.move(x, y)<|docstring|>Update width and height using an updated screen geometry.
Use a ratio for the width and height of the dialog.<|endoftext|> |
4e6415b6a92922939048ccc143a87829af4789f02b8ea8fc433e351872da802e | def load(self):
"Load this table's data into Athena."
data_file_names = self._get_file_names()
districts = sorted(data_file_names.keys())
for district in districts:
district_file_name = data_file_names[district]
with NamedTemporaryFile('w+b') as raw_file:
with gzip.open(raw_file, 'wb') as gzip_file:
text_gzip_file = TextIOWrapper(gzip_file, encoding='utf-8')
self._convert_raw_file(district_file_name, text_gzip_file)
text_gzip_file.close()
self._athena.upload_data(self.name, raw_file, district=district)
is_partitioned = (None not in districts)
ddl = self._generate_ddl(is_partitioned)
self._athena.execute_query(ddl)
self.logger.debug('Ensured table exists for {0}'.format(self.name))
if is_partitioned:
self._athena.execute_query('MSCK REPAIR TABLE {0};'.format(self.name))
self.logger.debug('Repaired table for {0}'.format(self.name))
self.logger.info('Loaded normal table {0}'.format(self.name)) | Load this table's data into Athena. | ncd/normal_table.py | load | associatedpress/national-caseload-data-ingest | 9 | python | def load(self):
data_file_names = self._get_file_names()
districts = sorted(data_file_names.keys())
for district in districts:
district_file_name = data_file_names[district]
with NamedTemporaryFile('w+b') as raw_file:
with gzip.open(raw_file, 'wb') as gzip_file:
text_gzip_file = TextIOWrapper(gzip_file, encoding='utf-8')
self._convert_raw_file(district_file_name, text_gzip_file)
text_gzip_file.close()
self._athena.upload_data(self.name, raw_file, district=district)
is_partitioned = (None not in districts)
ddl = self._generate_ddl(is_partitioned)
self._athena.execute_query(ddl)
self.logger.debug('Ensured table exists for {0}'.format(self.name))
if is_partitioned:
self._athena.execute_query('MSCK REPAIR TABLE {0};'.format(self.name))
self.logger.debug('Repaired table for {0}'.format(self.name))
self.logger.info('Loaded normal table {0}'.format(self.name)) | def load(self):
data_file_names = self._get_file_names()
districts = sorted(data_file_names.keys())
for district in districts:
district_file_name = data_file_names[district]
with NamedTemporaryFile('w+b') as raw_file:
with gzip.open(raw_file, 'wb') as gzip_file:
text_gzip_file = TextIOWrapper(gzip_file, encoding='utf-8')
self._convert_raw_file(district_file_name, text_gzip_file)
text_gzip_file.close()
self._athena.upload_data(self.name, raw_file, district=district)
is_partitioned = (None not in districts)
ddl = self._generate_ddl(is_partitioned)
self._athena.execute_query(ddl)
self.logger.debug('Ensured table exists for {0}'.format(self.name))
if is_partitioned:
self._athena.execute_query('MSCK REPAIR TABLE {0};'.format(self.name))
self.logger.debug('Repaired table for {0}'.format(self.name))
self.logger.info('Loaded normal table {0}'.format(self.name))<|docstring|>Load this table's data into Athena.<|endoftext|> |
b770cf36c86006f34679e3162a41fac464e0907ec6cbd56512260f363285192b | def _convert_raw_file(self, raw_path, gzip_file):
'Convert a raw data file for Athena and add it to a .gz.\n\n Args:\n raw_path: A string path to a file stored in self._zip.\n gzip_file: A file-like object to which our newly converted data\n should be appended.\n '
self.logger.debug('Beginning conversion of {0}'.format(raw_path))
with self._zip.open(raw_path) as raw_data:
without_carriage_returns = self._remove_crs(raw_data)
csv_data = self._make_csv(without_carriage_returns)
self._generate_rows(csv_data, gzip_file)
self.logger.debug('Completed conversion of {0}'.format(raw_path)) | Convert a raw data file for Athena and add it to a .gz.
Args:
raw_path: A string path to a file stored in self._zip.
gzip_file: A file-like object to which our newly converted data
should be appended. | ncd/normal_table.py | _convert_raw_file | associatedpress/national-caseload-data-ingest | 9 | python | def _convert_raw_file(self, raw_path, gzip_file):
'Convert a raw data file for Athena and add it to a .gz.\n\n Args:\n raw_path: A string path to a file stored in self._zip.\n gzip_file: A file-like object to which our newly converted data\n should be appended.\n '
self.logger.debug('Beginning conversion of {0}'.format(raw_path))
with self._zip.open(raw_path) as raw_data:
without_carriage_returns = self._remove_crs(raw_data)
csv_data = self._make_csv(without_carriage_returns)
self._generate_rows(csv_data, gzip_file)
self.logger.debug('Completed conversion of {0}'.format(raw_path)) | def _convert_raw_file(self, raw_path, gzip_file):
'Convert a raw data file for Athena and add it to a .gz.\n\n Args:\n raw_path: A string path to a file stored in self._zip.\n gzip_file: A file-like object to which our newly converted data\n should be appended.\n '
self.logger.debug('Beginning conversion of {0}'.format(raw_path))
with self._zip.open(raw_path) as raw_data:
without_carriage_returns = self._remove_crs(raw_data)
csv_data = self._make_csv(without_carriage_returns)
self._generate_rows(csv_data, gzip_file)
self.logger.debug('Completed conversion of {0}'.format(raw_path))<|docstring|>Convert a raw data file for Athena and add it to a .gz.
Args:
raw_path: A string path to a file stored in self._zip.
gzip_file: A file-like object to which our newly converted data
should be appended.<|endoftext|> |
4982ca8bba4e9f6277a4c31d24631e984b147ee00472d0663f34c049189e9e01 | def _gather_python_types(self):
'Determine which Python data type each field should have.\n\n Returns:\n A dict with field names as keys and functions as values.\n '
self._schema.seek(0)
schema_reader = DictReader(self._schema)
def _parse_oracle_date(raw_text):
return datetime.datetime.strptime(raw_text, '%d-%b-%Y').strftime('%Y-%m-%d').rjust(10, '0')
def converter_with_nulls(converter):
def convert(raw_text):
try:
return converter(raw_text)
except ValueError:
return None
return convert
def get_python_type(field_type_text):
field_components = re.match('(?P<type>[^(]+)(?:\\((?P<args>.+)\\))?', field_type_text)
field_type_component = field_components.group('type')
if (field_type_component in ('VARCHAR', 'VARCHAR2')):
return converter_with_nulls(str)
if (field_type_component == 'NUMBER'):
return converter_with_nulls(int)
if (field_type_component == 'DATE'):
return converter_with_nulls(_parse_oracle_date)
if (field_type_component == 'FLOAT'):
return converter_with_nulls(float)
raise NotImplementedError('Unsure how to handle a {0}'.format(field_type_text))
def build_column(row):
return (row['column'], get_python_type(row['field_type']))
return dict(map(build_column, schema_reader)) | Determine which Python data type each field should have.
Returns:
A dict with field names as keys and functions as values. | ncd/normal_table.py | _gather_python_types | associatedpress/national-caseload-data-ingest | 9 | python | def _gather_python_types(self):
'Determine which Python data type each field should have.\n\n Returns:\n A dict with field names as keys and functions as values.\n '
self._schema.seek(0)
schema_reader = DictReader(self._schema)
def _parse_oracle_date(raw_text):
return datetime.datetime.strptime(raw_text, '%d-%b-%Y').strftime('%Y-%m-%d').rjust(10, '0')
def converter_with_nulls(converter):
def convert(raw_text):
try:
return converter(raw_text)
except ValueError:
return None
return convert
def get_python_type(field_type_text):
field_components = re.match('(?P<type>[^(]+)(?:\\((?P<args>.+)\\))?', field_type_text)
field_type_component = field_components.group('type')
if (field_type_component in ('VARCHAR', 'VARCHAR2')):
return converter_with_nulls(str)
if (field_type_component == 'NUMBER'):
return converter_with_nulls(int)
if (field_type_component == 'DATE'):
return converter_with_nulls(_parse_oracle_date)
if (field_type_component == 'FLOAT'):
return converter_with_nulls(float)
raise NotImplementedError('Unsure how to handle a {0}'.format(field_type_text))
def build_column(row):
return (row['column'], get_python_type(row['field_type']))
return dict(map(build_column, schema_reader)) | def _gather_python_types(self):
'Determine which Python data type each field should have.\n\n Returns:\n A dict with field names as keys and functions as values.\n '
self._schema.seek(0)
schema_reader = DictReader(self._schema)
def _parse_oracle_date(raw_text):
return datetime.datetime.strptime(raw_text, '%d-%b-%Y').strftime('%Y-%m-%d').rjust(10, '0')
def converter_with_nulls(converter):
def convert(raw_text):
try:
return converter(raw_text)
except ValueError:
return None
return convert
def get_python_type(field_type_text):
field_components = re.match('(?P<type>[^(]+)(?:\\((?P<args>.+)\\))?', field_type_text)
field_type_component = field_components.group('type')
if (field_type_component in ('VARCHAR', 'VARCHAR2')):
return converter_with_nulls(str)
if (field_type_component == 'NUMBER'):
return converter_with_nulls(int)
if (field_type_component == 'DATE'):
return converter_with_nulls(_parse_oracle_date)
if (field_type_component == 'FLOAT'):
return converter_with_nulls(float)
raise NotImplementedError('Unsure how to handle a {0}'.format(field_type_text))
def build_column(row):
return (row['column'], get_python_type(row['field_type']))
return dict(map(build_column, schema_reader))<|docstring|>Determine which Python data type each field should have.
Returns:
A dict with field names as keys and functions as values.<|endoftext|> |
535dbc2202300008abceabc67ec488b77020802b90ed6b640691e27b601bb682 | def _generate_ddl(self, is_partitioned=False):
'Generate a CREATE EXTERNAL TABLE query to run on Athena.\n\n Args:\n is_partitioned: A boolean specifying whether a table is to be split\n into multiple files by federal judicial district (True) or\n consists of only one file covering all districts (False).\n\n Returns:\n A string SQL query to execute.\n '
self._schema.seek(0)
reader = DictReader(self._schema)
def get_athena_type(field_type_text):
field_components = re.match('(?P<type>[^(]+)(?:\\((?P<args>.+)\\))?', field_type_text)
field_type_component = field_components.group('type')
if (field_type_component in ('VARCHAR', 'VARCHAR2')):
return 'STRING'
if (field_type_component == 'NUMBER'):
return 'BIGINT'
if (field_type_component == 'DATE'):
return 'DATE'
if (field_type_component == 'FLOAT'):
return 'DOUBLE'
raise NotImplementedError('Unsure how to handle a {0}'.format(field_type_text))
def build_column(row):
data_column = '{0} {1}'.format(row['column'], get_athena_type(row['field_type']))
redaction_column = 'redacted_{0} BOOLEAN'.format(row['column'])
return (data_column, redaction_column)
column_pairs = tuple(map(build_column, reader))
data_columns = map(itemgetter(0), column_pairs)
redaction_columns = map(itemgetter(1), column_pairs)
columns = tuple(chain(data_columns, redaction_columns))
column_specs = ',\n '.join(columns)
if is_partitioned:
partition_clause = '\n PARTITIONED BY (filename_district STRING)'
else:
partition_clause = ''
query = "\n CREATE EXTERNAL TABLE IF NOT EXISTS {name} (\n {columns}\n ){partition_clause}\n ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'\n STORED AS TEXTFILE\n LOCATION 's3://{bucket}/{table_prefix}';\n ".format(name=self.name, columns=column_specs, partition_clause=partition_clause, bucket=self._athena.data_bucket, table_prefix=self._athena.prefix_for_table(self.name))
return dedent(query) | Generate a CREATE EXTERNAL TABLE query to run on Athena.
Args:
is_partitioned: A boolean specifying whether a table is to be split
into multiple files by federal judicial district (True) or
consists of only one file covering all districts (False).
Returns:
A string SQL query to execute. | ncd/normal_table.py | _generate_ddl | associatedpress/national-caseload-data-ingest | 9 | python | def _generate_ddl(self, is_partitioned=False):
'Generate a CREATE EXTERNAL TABLE query to run on Athena.\n\n Args:\n is_partitioned: A boolean specifying whether a table is to be split\n into multiple files by federal judicial district (True) or\n consists of only one file covering all districts (False).\n\n Returns:\n A string SQL query to execute.\n '
self._schema.seek(0)
reader = DictReader(self._schema)
def get_athena_type(field_type_text):
field_components = re.match('(?P<type>[^(]+)(?:\\((?P<args>.+)\\))?', field_type_text)
field_type_component = field_components.group('type')
if (field_type_component in ('VARCHAR', 'VARCHAR2')):
return 'STRING'
if (field_type_component == 'NUMBER'):
return 'BIGINT'
if (field_type_component == 'DATE'):
return 'DATE'
if (field_type_component == 'FLOAT'):
return 'DOUBLE'
raise NotImplementedError('Unsure how to handle a {0}'.format(field_type_text))
def build_column(row):
data_column = '{0} {1}'.format(row['column'], get_athena_type(row['field_type']))
redaction_column = 'redacted_{0} BOOLEAN'.format(row['column'])
return (data_column, redaction_column)
column_pairs = tuple(map(build_column, reader))
data_columns = map(itemgetter(0), column_pairs)
redaction_columns = map(itemgetter(1), column_pairs)
columns = tuple(chain(data_columns, redaction_columns))
column_specs = ',\n '.join(columns)
if is_partitioned:
partition_clause = '\n PARTITIONED BY (filename_district STRING)'
else:
partition_clause =
query = "\n CREATE EXTERNAL TABLE IF NOT EXISTS {name} (\n {columns}\n ){partition_clause}\n ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'\n STORED AS TEXTFILE\n LOCATION 's3://{bucket}/{table_prefix}';\n ".format(name=self.name, columns=column_specs, partition_clause=partition_clause, bucket=self._athena.data_bucket, table_prefix=self._athena.prefix_for_table(self.name))
return dedent(query) | def _generate_ddl(self, is_partitioned=False):
'Generate a CREATE EXTERNAL TABLE query to run on Athena.\n\n Args:\n is_partitioned: A boolean specifying whether a table is to be split\n into multiple files by federal judicial district (True) or\n consists of only one file covering all districts (False).\n\n Returns:\n A string SQL query to execute.\n '
self._schema.seek(0)
reader = DictReader(self._schema)
def get_athena_type(field_type_text):
field_components = re.match('(?P<type>[^(]+)(?:\\((?P<args>.+)\\))?', field_type_text)
field_type_component = field_components.group('type')
if (field_type_component in ('VARCHAR', 'VARCHAR2')):
return 'STRING'
if (field_type_component == 'NUMBER'):
return 'BIGINT'
if (field_type_component == 'DATE'):
return 'DATE'
if (field_type_component == 'FLOAT'):
return 'DOUBLE'
raise NotImplementedError('Unsure how to handle a {0}'.format(field_type_text))
def build_column(row):
data_column = '{0} {1}'.format(row['column'], get_athena_type(row['field_type']))
redaction_column = 'redacted_{0} BOOLEAN'.format(row['column'])
return (data_column, redaction_column)
column_pairs = tuple(map(build_column, reader))
data_columns = map(itemgetter(0), column_pairs)
redaction_columns = map(itemgetter(1), column_pairs)
columns = tuple(chain(data_columns, redaction_columns))
column_specs = ',\n '.join(columns)
if is_partitioned:
partition_clause = '\n PARTITIONED BY (filename_district STRING)'
else:
partition_clause =
query = "\n CREATE EXTERNAL TABLE IF NOT EXISTS {name} (\n {columns}\n ){partition_clause}\n ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'\n STORED AS TEXTFILE\n LOCATION 's3://{bucket}/{table_prefix}';\n ".format(name=self.name, columns=column_specs, partition_clause=partition_clause, bucket=self._athena.data_bucket, table_prefix=self._athena.prefix_for_table(self.name))
return dedent(query)<|docstring|>Generate a CREATE EXTERNAL TABLE query to run on Athena.
Args:
is_partitioned: A boolean specifying whether a table is to be split
into multiple files by federal judicial district (True) or
consists of only one file covering all districts (False).
Returns:
A string SQL query to execute.<|endoftext|> |
5c327f8f96aace142700d5b4c19ca0a1e77c84ad084b3ca623a527a64bb4e8c5 | def _generate_rows(self, csv_data, gzip_file):
'Convert rows of a CSV and append the results to a .gz.\n\n Args:\n csv_data: A text file-like object containing CSV data.\n gzip_file: A file-like object to which our newly converted data\n should be appended.\n '
field_converters = self._gather_python_types()
reader = DictReader(csv_data)
for input_row in reader:
output_obj = {}
for (field_name, field_raw_value) in input_row.items():
if (field_raw_value == '*'):
field_value = None
redacted_value = True
else:
field_value = field_converters[field_name](field_raw_value)
redacted_value = False
output_obj[field_name] = field_value
output_obj['redacted_{0}'.format(field_name)] = redacted_value
output_json = json.dumps(output_obj)
gzip_file.write('{0}\n'.format(output_json)) | Convert rows of a CSV and append the results to a .gz.
Args:
csv_data: A text file-like object containing CSV data.
gzip_file: A file-like object to which our newly converted data
should be appended. | ncd/normal_table.py | _generate_rows | associatedpress/national-caseload-data-ingest | 9 | python | def _generate_rows(self, csv_data, gzip_file):
'Convert rows of a CSV and append the results to a .gz.\n\n Args:\n csv_data: A text file-like object containing CSV data.\n gzip_file: A file-like object to which our newly converted data\n should be appended.\n '
field_converters = self._gather_python_types()
reader = DictReader(csv_data)
for input_row in reader:
output_obj = {}
for (field_name, field_raw_value) in input_row.items():
if (field_raw_value == '*'):
field_value = None
redacted_value = True
else:
field_value = field_converters[field_name](field_raw_value)
redacted_value = False
output_obj[field_name] = field_value
output_obj['redacted_{0}'.format(field_name)] = redacted_value
output_json = json.dumps(output_obj)
gzip_file.write('{0}\n'.format(output_json)) | def _generate_rows(self, csv_data, gzip_file):
'Convert rows of a CSV and append the results to a .gz.\n\n Args:\n csv_data: A text file-like object containing CSV data.\n gzip_file: A file-like object to which our newly converted data\n should be appended.\n '
field_converters = self._gather_python_types()
reader = DictReader(csv_data)
for input_row in reader:
output_obj = {}
for (field_name, field_raw_value) in input_row.items():
if (field_raw_value == '*'):
field_value = None
redacted_value = True
else:
field_value = field_converters[field_name](field_raw_value)
redacted_value = False
output_obj[field_name] = field_value
output_obj['redacted_{0}'.format(field_name)] = redacted_value
output_json = json.dumps(output_obj)
gzip_file.write('{0}\n'.format(output_json))<|docstring|>Convert rows of a CSV and append the results to a .gz.
Args:
csv_data: A text file-like object containing CSV data.
gzip_file: A file-like object to which our newly converted data
should be appended.<|endoftext|> |
e87a838515402e0499ade5aa6fdbbc0fb7f974d322c7f8390a0cd3c851e1685e | def _get_file_names(self):
'Determine which contents to use from our zip file.\n\n Returns:\n A dict. Each key specifies the federal judicial district covered by\n a given data file; this is a string unless the file covers all\n districts, which case it is None. Each value is a string filename\n for the given data file within self._zip.\n '
lowercase_name = self.name.lower()
file_name_pattern = re.compile(''.join(['^', lowercase_name, '(?:_(?P<district>[A-Z]+))?\\.txt$']))
def file_is_for_table(file_name):
match = file_name_pattern.match(file_name)
if (not match):
return None
return (match.group('district'), file_name)
data_file_names = dict(filter(None, map(file_is_for_table, self._zip.namelist())))
return data_file_names | Determine which contents to use from our zip file.
Returns:
A dict. Each key specifies the federal judicial district covered by
a given data file; this is a string unless the file covers all
districts, which case it is None. Each value is a string filename
for the given data file within self._zip. | ncd/normal_table.py | _get_file_names | associatedpress/national-caseload-data-ingest | 9 | python | def _get_file_names(self):
'Determine which contents to use from our zip file.\n\n Returns:\n A dict. Each key specifies the federal judicial district covered by\n a given data file; this is a string unless the file covers all\n districts, which case it is None. Each value is a string filename\n for the given data file within self._zip.\n '
lowercase_name = self.name.lower()
file_name_pattern = re.compile(.join(['^', lowercase_name, '(?:_(?P<district>[A-Z]+))?\\.txt$']))
def file_is_for_table(file_name):
match = file_name_pattern.match(file_name)
if (not match):
return None
return (match.group('district'), file_name)
data_file_names = dict(filter(None, map(file_is_for_table, self._zip.namelist())))
return data_file_names | def _get_file_names(self):
'Determine which contents to use from our zip file.\n\n Returns:\n A dict. Each key specifies the federal judicial district covered by\n a given data file; this is a string unless the file covers all\n districts, which case it is None. Each value is a string filename\n for the given data file within self._zip.\n '
lowercase_name = self.name.lower()
file_name_pattern = re.compile(.join(['^', lowercase_name, '(?:_(?P<district>[A-Z]+))?\\.txt$']))
def file_is_for_table(file_name):
match = file_name_pattern.match(file_name)
if (not match):
return None
return (match.group('district'), file_name)
data_file_names = dict(filter(None, map(file_is_for_table, self._zip.namelist())))
return data_file_names<|docstring|>Determine which contents to use from our zip file.
Returns:
A dict. Each key specifies the federal judicial district covered by
a given data file; this is a string unless the file covers all
districts, which case it is None. Each value is a string filename
for the given data file within self._zip.<|endoftext|> |
709812eebe3839461895d8b8241ec07b5780462a5ac71d0def6db6afe109b6da | def _make_csv(self, fixed_width_data):
'Convert a fixed-width data file to a CSV.\n\n Args:\n fixed_width_data: A text file-like object containing fixed-width\n data, following the format described in self._schema.\n\n Returns:\n A text file-like object containing CSV data.\n '
self._schema.seek(0)
fixed_width_data.seek(0)
fixed_width_text = TextIOWrapper(fixed_width_data, encoding='latin-1')
csv_file = TemporaryFile(mode='w+')
fixed2csv(fixed_width_text, self._schema, output=csv_file)
fixed_width_text.close()
csv_file.seek(0)
self.logger.debug('Converted fixed-width data to CSV')
return csv_file | Convert a fixed-width data file to a CSV.
Args:
fixed_width_data: A text file-like object containing fixed-width
data, following the format described in self._schema.
Returns:
A text file-like object containing CSV data. | ncd/normal_table.py | _make_csv | associatedpress/national-caseload-data-ingest | 9 | python | def _make_csv(self, fixed_width_data):
'Convert a fixed-width data file to a CSV.\n\n Args:\n fixed_width_data: A text file-like object containing fixed-width\n data, following the format described in self._schema.\n\n Returns:\n A text file-like object containing CSV data.\n '
self._schema.seek(0)
fixed_width_data.seek(0)
fixed_width_text = TextIOWrapper(fixed_width_data, encoding='latin-1')
csv_file = TemporaryFile(mode='w+')
fixed2csv(fixed_width_text, self._schema, output=csv_file)
fixed_width_text.close()
csv_file.seek(0)
self.logger.debug('Converted fixed-width data to CSV')
return csv_file | def _make_csv(self, fixed_width_data):
'Convert a fixed-width data file to a CSV.\n\n Args:\n fixed_width_data: A text file-like object containing fixed-width\n data, following the format described in self._schema.\n\n Returns:\n A text file-like object containing CSV data.\n '
self._schema.seek(0)
fixed_width_data.seek(0)
fixed_width_text = TextIOWrapper(fixed_width_data, encoding='latin-1')
csv_file = TemporaryFile(mode='w+')
fixed2csv(fixed_width_text, self._schema, output=csv_file)
fixed_width_text.close()
csv_file.seek(0)
self.logger.debug('Converted fixed-width data to CSV')
return csv_file<|docstring|>Convert a fixed-width data file to a CSV.
Args:
fixed_width_data: A text file-like object containing fixed-width
data, following the format described in self._schema.
Returns:
A text file-like object containing CSV data.<|endoftext|> |
e222bad3e335320de5b6e6378d2028ac4c442206d16cb077283f851f1d402818 | def _remove_crs(self, raw_data):
'Remove carriage returns from a file.\n\n Args:\n raw_data: A file-like object.\n\n Returns:\n A file-like object with most of the same content.\n '
no_cr_file = TemporaryFile(mode='w+b')
while True:
raw_chunk = raw_data.read(4096)
if (not raw_chunk):
break
fixed_chunk = raw_chunk.replace(b'\r', b' ')
no_cr_file.write(fixed_chunk)
no_cr_file.seek(0)
raw_data.close()
self.logger.debug('Removed carriage returns')
return no_cr_file | Remove carriage returns from a file.
Args:
raw_data: A file-like object.
Returns:
A file-like object with most of the same content. | ncd/normal_table.py | _remove_crs | associatedpress/national-caseload-data-ingest | 9 | python | def _remove_crs(self, raw_data):
'Remove carriage returns from a file.\n\n Args:\n raw_data: A file-like object.\n\n Returns:\n A file-like object with most of the same content.\n '
no_cr_file = TemporaryFile(mode='w+b')
while True:
raw_chunk = raw_data.read(4096)
if (not raw_chunk):
break
fixed_chunk = raw_chunk.replace(b'\r', b' ')
no_cr_file.write(fixed_chunk)
no_cr_file.seek(0)
raw_data.close()
self.logger.debug('Removed carriage returns')
return no_cr_file | def _remove_crs(self, raw_data):
'Remove carriage returns from a file.\n\n Args:\n raw_data: A file-like object.\n\n Returns:\n A file-like object with most of the same content.\n '
no_cr_file = TemporaryFile(mode='w+b')
while True:
raw_chunk = raw_data.read(4096)
if (not raw_chunk):
break
fixed_chunk = raw_chunk.replace(b'\r', b' ')
no_cr_file.write(fixed_chunk)
no_cr_file.seek(0)
raw_data.close()
self.logger.debug('Removed carriage returns')
return no_cr_file<|docstring|>Remove carriage returns from a file.
Args:
raw_data: A file-like object.
Returns:
A file-like object with most of the same content.<|endoftext|> |
58b0d0e4ec5cf8d6558ebba778754e68599bd4065aa0427cc96adf3789ac087a | def start_router(router_class, router_name):
'Wrapper for starting a router and register it.\n\n Args:\n router_class: The router class to instantiate.\n router_name: The name to give to the router.\n\n Returns:\n A handle to newly started router actor.\n '
handle = router_class.remote(router_name)
ray.experimental.register_actor(router_name, handle)
handle.start.remote()
return handle | Wrapper for starting a router and register it.
Args:
router_class: The router class to instantiate.
router_name: The name to give to the router.
Returns:
A handle to newly started router actor. | python/ray/experimental/serve/router/__init__.py | start_router | BnJam/ray | 3 | python | def start_router(router_class, router_name):
'Wrapper for starting a router and register it.\n\n Args:\n router_class: The router class to instantiate.\n router_name: The name to give to the router.\n\n Returns:\n A handle to newly started router actor.\n '
handle = router_class.remote(router_name)
ray.experimental.register_actor(router_name, handle)
handle.start.remote()
return handle | def start_router(router_class, router_name):
'Wrapper for starting a router and register it.\n\n Args:\n router_class: The router class to instantiate.\n router_name: The name to give to the router.\n\n Returns:\n A handle to newly started router actor.\n '
handle = router_class.remote(router_name)
ray.experimental.register_actor(router_name, handle)
handle.start.remote()
return handle<|docstring|>Wrapper for starting a router and register it.
Args:
router_class: The router class to instantiate.
router_name: The name to give to the router.
Returns:
A handle to newly started router actor.<|endoftext|> |
92ee6c3a1ccc3db7419494f01828cb420afdadb73ac312354acad658a2df3a0b | def package_freeswitch_config():
'Packages our freeswitch config files and drops them in /etc.'
run('mkdir -p ~/endaga-packages')
path = '~/client/conf/freeswitch-conf-endaga'
print(('packaging %s' % path))
with cd(path):
run(('fpm -s dir -t %s -a all -n freeswitch-conf-endaga -v `cat VERSION` --description "Endaga Freeswitch config files" freeswitch=/etc' % env.pkgfmt))
run(('mv *.%s ~/endaga-packages' % env.pkgfmt)) | Packages our freeswitch config files and drops them in /etc. | client/commands/config_packaging.py | package_freeswitch_config | cclauss/CommunityCellularManager | 84 | python | def package_freeswitch_config():
run('mkdir -p ~/endaga-packages')
path = '~/client/conf/freeswitch-conf-endaga'
print(('packaging %s' % path))
with cd(path):
run(('fpm -s dir -t %s -a all -n freeswitch-conf-endaga -v `cat VERSION` --description "Endaga Freeswitch config files" freeswitch=/etc' % env.pkgfmt))
run(('mv *.%s ~/endaga-packages' % env.pkgfmt)) | def package_freeswitch_config():
run('mkdir -p ~/endaga-packages')
path = '~/client/conf/freeswitch-conf-endaga'
print(('packaging %s' % path))
with cd(path):
run(('fpm -s dir -t %s -a all -n freeswitch-conf-endaga -v `cat VERSION` --description "Endaga Freeswitch config files" freeswitch=/etc' % env.pkgfmt))
run(('mv *.%s ~/endaga-packages' % env.pkgfmt))<|docstring|>Packages our freeswitch config files and drops them in /etc.<|endoftext|> |
0509231e8741f4de931938b510229fe5fb212d9036041b5e1529985d731a9de8 | def package_endaga_lang_config():
'Packages our translation files.'
extract_pot()
compile_lang()
run('mkdir -p ~/endaga-packages')
path = '~/client/endaga-lang'
print(('packaging %s' % path))
with cd(path):
run(('fpm -s dir -t %s -a all -n endaga-lang -v `cat VERSION` --description "Endaga translation files" locale=/usr/share' % env.pkgfmt))
run(('mv *.%s ~/endaga-packages' % env.pkgfmt)) | Packages our translation files. | client/commands/config_packaging.py | package_endaga_lang_config | cclauss/CommunityCellularManager | 84 | python | def package_endaga_lang_config():
extract_pot()
compile_lang()
run('mkdir -p ~/endaga-packages')
path = '~/client/endaga-lang'
print(('packaging %s' % path))
with cd(path):
run(('fpm -s dir -t %s -a all -n endaga-lang -v `cat VERSION` --description "Endaga translation files" locale=/usr/share' % env.pkgfmt))
run(('mv *.%s ~/endaga-packages' % env.pkgfmt)) | def package_endaga_lang_config():
extract_pot()
compile_lang()
run('mkdir -p ~/endaga-packages')
path = '~/client/endaga-lang'
print(('packaging %s' % path))
with cd(path):
run(('fpm -s dir -t %s -a all -n endaga-lang -v `cat VERSION` --description "Endaga translation files" locale=/usr/share' % env.pkgfmt))
run(('mv *.%s ~/endaga-packages' % env.pkgfmt))<|docstring|>Packages our translation files.<|endoftext|> |
6984cf70b0a18406f671ec173acfd654eb4ec6e9b9e028ae3e304cea7398fd75 | def remove_invalid_options(context, search_options, allowed_search_options):
'Remove search options that are not valid for non-admin API/context.'
if context.is_admin:
return
unknown_options = [opt for opt in search_options if (opt not in allowed_search_options)]
LOG.debug("Removing options '%s' from query", ', '.join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None) | Remove search options that are not valid for non-admin API/context. | nova/api/openstack/compute/plugins/v3/servers.py | remove_invalid_options | orbitfp7/nova | 5 | python | def remove_invalid_options(context, search_options, allowed_search_options):
if context.is_admin:
return
unknown_options = [opt for opt in search_options if (opt not in allowed_search_options)]
LOG.debug("Removing options '%s' from query", ', '.join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None) | def remove_invalid_options(context, search_options, allowed_search_options):
if context.is_admin:
return
unknown_options = [opt for opt in search_options if (opt not in allowed_search_options)]
LOG.debug("Removing options '%s' from query", ', '.join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)<|docstring|>Remove search options that are not valid for non-admin API/context.<|endoftext|> |
0235b1073559ed4091269f35315279bbd3c9df85c6481bce19b2522d65d2bd2a | @extensions.expected_errors((400, 403))
def index(self, req):
'Returns a list of server names and ids for a given user.'
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers | Returns a list of server names and ids for a given user. | nova/api/openstack/compute/plugins/v3/servers.py | index | orbitfp7/nova | 5 | python | @extensions.expected_errors((400, 403))
def index(self, req):
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers | @extensions.expected_errors((400, 403))
def index(self, req):
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers<|docstring|>Returns a list of server names and ids for a given user.<|endoftext|> |
257b2378529b1b540fbfc3bfb8305541e0a0b2f85225b1b408814ac692622fdc | @extensions.expected_errors((400, 403))
def detail(self, req):
'Returns a list of server details for a given user.'
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers | Returns a list of server details for a given user. | nova/api/openstack/compute/plugins/v3/servers.py | detail | orbitfp7/nova | 5 | python | @extensions.expected_errors((400, 403))
def detail(self, req):
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers | @extensions.expected_errors((400, 403))
def detail(self, req):
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers<|docstring|>Returns a list of server details for a given user.<|endoftext|> |
59bd799061878f55b64e7355480e7a200e0096a5aaabb5a6dec98ce39c5df842 | def _get_servers(self, req, is_detail):
'Returns a list of servers, based on any search options specified.'
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts, self._get_server_search_options())
search_opts.pop('status', None)
if ('status' in req.GET.keys()):
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
(vm_state, task_state) = states
if ((not vm_state) and (not task_state)):
return {'servers': []}
search_opts['vm_state'] = vm_state
if ('default' not in task_state):
search_opts['task_state'] = task_state
if ('changes-since' in search_opts):
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
if ('deleted' not in search_opts):
if ('changes-since' not in search_opts):
search_opts['deleted'] = False
if (search_opts.get('vm_state') == ['deleted']):
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _('Only administrators may list deleted instances')
raise exc.HTTPForbidden(explanation=msg)
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if (not strutils.bool_from_string(all_tenants, True)):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
if ('all_tenants' in search_opts):
policy.enforce(context, 'compute:get_all_tenants', {'project_id': context.project_id, 'user_id': context.user_id})
del search_opts['all_tenants']
elif context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
(limit, marker) = common.get_limit_and_marker(req)
(sort_keys, sort_dirs) = common.get_sort_params(req.params)
try:
instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices'], sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = (_('marker [%s] not found') % marker)
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found ", search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response | Returns a list of servers, based on any search options specified. | nova/api/openstack/compute/plugins/v3/servers.py | _get_servers | orbitfp7/nova | 5 | python | def _get_servers(self, req, is_detail):
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts, self._get_server_search_options())
search_opts.pop('status', None)
if ('status' in req.GET.keys()):
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
(vm_state, task_state) = states
if ((not vm_state) and (not task_state)):
return {'servers': []}
search_opts['vm_state'] = vm_state
if ('default' not in task_state):
search_opts['task_state'] = task_state
if ('changes-since' in search_opts):
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
if ('deleted' not in search_opts):
if ('changes-since' not in search_opts):
search_opts['deleted'] = False
if (search_opts.get('vm_state') == ['deleted']):
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _('Only administrators may list deleted instances')
raise exc.HTTPForbidden(explanation=msg)
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if (not strutils.bool_from_string(all_tenants, True)):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
if ('all_tenants' in search_opts):
policy.enforce(context, 'compute:get_all_tenants', {'project_id': context.project_id, 'user_id': context.user_id})
del search_opts['all_tenants']
elif context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
(limit, marker) = common.get_limit_and_marker(req)
(sort_keys, sort_dirs) = common.get_sort_params(req.params)
try:
instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices'], sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = (_('marker [%s] not found') % marker)
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found ", search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response | def _get_servers(self, req, is_detail):
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts, self._get_server_search_options())
search_opts.pop('status', None)
if ('status' in req.GET.keys()):
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
(vm_state, task_state) = states
if ((not vm_state) and (not task_state)):
return {'servers': []}
search_opts['vm_state'] = vm_state
if ('default' not in task_state):
search_opts['task_state'] = task_state
if ('changes-since' in search_opts):
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
if ('deleted' not in search_opts):
if ('changes-since' not in search_opts):
search_opts['deleted'] = False
if (search_opts.get('vm_state') == ['deleted']):
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _('Only administrators may list deleted instances')
raise exc.HTTPForbidden(explanation=msg)
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if (not strutils.bool_from_string(all_tenants, True)):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
if ('all_tenants' in search_opts):
policy.enforce(context, 'compute:get_all_tenants', {'project_id': context.project_id, 'user_id': context.user_id})
del search_opts['all_tenants']
elif context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
(limit, marker) = common.get_limit_and_marker(req)
(sort_keys, sort_dirs) = common.get_sort_params(req.params)
try:
instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices'], sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = (_('marker [%s] not found') % marker)
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found ", search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response<|docstring|>Returns a list of servers, based on any search options specified.<|endoftext|> |
f6797ca9eef5f2c719b17caa12c62d82e0c44f863f9f8886338f9b1150233a52 | def _get_server(self, context, req, instance_uuid):
'Utility function for looking up an instance by uuid.'
instance = common.get_instance(self.compute_api, context, instance_uuid, want_objects=True, expected_attrs=['pci_devices', 'flavor'])
req.cache_db_instance(instance)
return instance | Utility function for looking up an instance by uuid. | nova/api/openstack/compute/plugins/v3/servers.py | _get_server | orbitfp7/nova | 5 | python | def _get_server(self, context, req, instance_uuid):
instance = common.get_instance(self.compute_api, context, instance_uuid, want_objects=True, expected_attrs=['pci_devices', 'flavor'])
req.cache_db_instance(instance)
return instance | def _get_server(self, context, req, instance_uuid):
instance = common.get_instance(self.compute_api, context, instance_uuid, want_objects=True, expected_attrs=['pci_devices', 'flavor'])
req.cache_db_instance(instance)
return instance<|docstring|>Utility function for looking up an instance by uuid.<|endoftext|> |
0c4e90f67f18e4a2ae9d8326ff5e0ee7bf2f82276954d4da1926b624a0d3a6ae | def _get_requested_networks(self, requested_networks):
'Create a list of requested networks from the networks attribute.'
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
if request.port_id:
request.network_id = None
if (not utils.is_neutron()):
msg = _('Unknown argument: port')
raise exc.HTTPBadRequest(explanation=msg)
if (request.address is not None):
msg = (_("Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port already has a Fixed IP allocated.") % {'addr': request.address, 'port': request.port_id})
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if ((not request.port_id) and (not uuidutils.is_uuid_like(request.network_id))):
br_uuid = request.network_id.split('-', 1)[(- 1)]
if (not uuidutils.is_uuid_like(br_uuid)):
msg = (_('Bad networks format: network uuid is not in proper format (%s)') % request.network_id)
raise exc.HTTPBadRequest(explanation=msg)
if ((not utils.is_neutron()) and request.network_id and (request.network_id in network_uuids)):
expl = (_('Duplicate networks (%s) are not allowed') % request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = (_('Bad network format: missing %s') % key)
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks) | Create a list of requested networks from the networks attribute. | nova/api/openstack/compute/plugins/v3/servers.py | _get_requested_networks | orbitfp7/nova | 5 | python | def _get_requested_networks(self, requested_networks):
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
if request.port_id:
request.network_id = None
if (not utils.is_neutron()):
msg = _('Unknown argument: port')
raise exc.HTTPBadRequest(explanation=msg)
if (request.address is not None):
msg = (_("Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port already has a Fixed IP allocated.") % {'addr': request.address, 'port': request.port_id})
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if ((not request.port_id) and (not uuidutils.is_uuid_like(request.network_id))):
br_uuid = request.network_id.split('-', 1)[(- 1)]
if (not uuidutils.is_uuid_like(br_uuid)):
msg = (_('Bad networks format: network uuid is not in proper format (%s)') % request.network_id)
raise exc.HTTPBadRequest(explanation=msg)
if ((not utils.is_neutron()) and request.network_id and (request.network_id in network_uuids)):
expl = (_('Duplicate networks (%s) are not allowed') % request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = (_('Bad network format: missing %s') % key)
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks) | def _get_requested_networks(self, requested_networks):
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
if request.port_id:
request.network_id = None
if (not utils.is_neutron()):
msg = _('Unknown argument: port')
raise exc.HTTPBadRequest(explanation=msg)
if (request.address is not None):
msg = (_("Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port already has a Fixed IP allocated.") % {'addr': request.address, 'port': request.port_id})
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if ((not request.port_id) and (not uuidutils.is_uuid_like(request.network_id))):
br_uuid = request.network_id.split('-', 1)[(- 1)]
if (not uuidutils.is_uuid_like(br_uuid)):
msg = (_('Bad networks format: network uuid is not in proper format (%s)') % request.network_id)
raise exc.HTTPBadRequest(explanation=msg)
if ((not utils.is_neutron()) and request.network_id and (request.network_id in network_uuids)):
expl = (_('Duplicate networks (%s) are not allowed') % request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = (_('Bad network format: missing %s') % key)
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)<|docstring|>Create a list of requested networks from the networks attribute.<|endoftext|> |
e19fcd6c9d19a440ea00cefce5dc734f7111f12ed5a75d34ad8f2c19af610021 | @extensions.expected_errors(404)
def show(self, req, id):
'Returns server details by server id.'
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id, want_objects=True, expected_attrs=['pci_devices', 'flavor'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance) | Returns server details by server id. | nova/api/openstack/compute/plugins/v3/servers.py | show | orbitfp7/nova | 5 | python | @extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id, want_objects=True, expected_attrs=['pci_devices', 'flavor'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance) | @extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id, want_objects=True, expected_attrs=['pci_devices', 'flavor'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)<|docstring|>Returns server details by server id.<|endoftext|> |
5385e05069734186e4bd0343bcb19a69d8bdff3e7ba1acd8afd47add85770f3f | @wsgi.response(202)
@extensions.expected_errors((400, 403, 409, 413))
@validation.schema(schema_server_create)
def create(self, req, body):
'Creates a new server for a given user.'
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = server_dict['name']
create_kwargs = {}
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point, server_dict, create_kwargs, body)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
return_reservation_id = create_kwargs.pop('return_reservation_id', False)
requested_networks = None
if (('os-networks' in self.extension_info.get_extensions()) or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if (requested_networks is not None):
requested_networks = self._get_requested_networks(requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _('Invalid flavorRef provided.')
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(flavor_id, ctxt=context, read_deleted='no')
(instances, resv_id) = self.compute_api.create(context, inst_type, image_uuid, display_name=name, display_description=name, metadata=server_dict.get('metadata', {}), admin_password=password, requested_networks=requested_networks, check_server_group_quota=True, **create_kwargs)
except (exception.QuotaError, exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(explanation=error.format_message(), headers={'Retry-After': 0})
except exception.ImageNotFound:
msg = _('Can not find requested image')
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _('Invalid flavorRef provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound:
msg = _('Invalid key_name provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _('Invalid config_drive provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = ('%(err_type)s: %(err_msg)s' % {'err_type': err.exc_type, 'err_msg': err.value})
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = ('UnicodeError: %s' % error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.InvalidRequest, exception.InvalidVolume, exception.MultiplePortsNotApplicable, exception.InvalidFixedIpAndMaxCountRequest, exception.InstanceUserDataMalformed, exception.InstanceUserDataTooLarge, exception.PortNotFound, exception.FixedIpAlreadyInUse, exception.SecurityGroupNotFound, exception.PortRequiresFixedIP, exception.NetworkRequiresSubnet, exception.NetworkNotFound, exception.InvalidBDMVolumeNotBootable, exception.InvalidBDMSnapshot, exception.InvalidBDMVolume, exception.InvalidBDMImage, exception.InvalidBDMBootSequence, exception.InvalidBDMLocalsLimit, exception.InvalidBDMVolumeNotBootable, exception.AutoDiskConfigDisabledByImage, exception.ImageNUMATopologyIncomplete, exception.ImageNUMATopologyForbidden, exception.ImageNUMATopologyAsymmetric, exception.ImageNUMATopologyCPUOutOfRange, exception.ImageNUMATopologyCPUDuplicates, exception.ImageNUMATopologyCPUsUnassigned, exception.ImageNUMATopologyMemoryOutOfRange) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse, exception.InstanceExists, exception.NetworkAmbiguous, exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
if return_reservation_id:
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj) | Creates a new server for a given user. | nova/api/openstack/compute/plugins/v3/servers.py | create | orbitfp7/nova | 5 | python | @wsgi.response(202)
@extensions.expected_errors((400, 403, 409, 413))
@validation.schema(schema_server_create)
def create(self, req, body):
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = server_dict['name']
create_kwargs = {}
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point, server_dict, create_kwargs, body)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
return_reservation_id = create_kwargs.pop('return_reservation_id', False)
requested_networks = None
if (('os-networks' in self.extension_info.get_extensions()) or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if (requested_networks is not None):
requested_networks = self._get_requested_networks(requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _('Invalid flavorRef provided.')
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(flavor_id, ctxt=context, read_deleted='no')
(instances, resv_id) = self.compute_api.create(context, inst_type, image_uuid, display_name=name, display_description=name, metadata=server_dict.get('metadata', {}), admin_password=password, requested_networks=requested_networks, check_server_group_quota=True, **create_kwargs)
except (exception.QuotaError, exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(explanation=error.format_message(), headers={'Retry-After': 0})
except exception.ImageNotFound:
msg = _('Can not find requested image')
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _('Invalid flavorRef provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound:
msg = _('Invalid key_name provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _('Invalid config_drive provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = ('%(err_type)s: %(err_msg)s' % {'err_type': err.exc_type, 'err_msg': err.value})
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = ('UnicodeError: %s' % error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.InvalidRequest, exception.InvalidVolume, exception.MultiplePortsNotApplicable, exception.InvalidFixedIpAndMaxCountRequest, exception.InstanceUserDataMalformed, exception.InstanceUserDataTooLarge, exception.PortNotFound, exception.FixedIpAlreadyInUse, exception.SecurityGroupNotFound, exception.PortRequiresFixedIP, exception.NetworkRequiresSubnet, exception.NetworkNotFound, exception.InvalidBDMVolumeNotBootable, exception.InvalidBDMSnapshot, exception.InvalidBDMVolume, exception.InvalidBDMImage, exception.InvalidBDMBootSequence, exception.InvalidBDMLocalsLimit, exception.InvalidBDMVolumeNotBootable, exception.AutoDiskConfigDisabledByImage, exception.ImageNUMATopologyIncomplete, exception.ImageNUMATopologyForbidden, exception.ImageNUMATopologyAsymmetric, exception.ImageNUMATopologyCPUOutOfRange, exception.ImageNUMATopologyCPUDuplicates, exception.ImageNUMATopologyCPUsUnassigned, exception.ImageNUMATopologyMemoryOutOfRange) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse, exception.InstanceExists, exception.NetworkAmbiguous, exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
if return_reservation_id:
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj) | @wsgi.response(202)
@extensions.expected_errors((400, 403, 409, 413))
@validation.schema(schema_server_create)
def create(self, req, body):
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = server_dict['name']
create_kwargs = {}
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point, server_dict, create_kwargs, body)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
return_reservation_id = create_kwargs.pop('return_reservation_id', False)
requested_networks = None
if (('os-networks' in self.extension_info.get_extensions()) or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if (requested_networks is not None):
requested_networks = self._get_requested_networks(requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _('Invalid flavorRef provided.')
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(flavor_id, ctxt=context, read_deleted='no')
(instances, resv_id) = self.compute_api.create(context, inst_type, image_uuid, display_name=name, display_description=name, metadata=server_dict.get('metadata', {}), admin_password=password, requested_networks=requested_networks, check_server_group_quota=True, **create_kwargs)
except (exception.QuotaError, exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(explanation=error.format_message(), headers={'Retry-After': 0})
except exception.ImageNotFound:
msg = _('Can not find requested image')
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _('Invalid flavorRef provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound:
msg = _('Invalid key_name provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _('Invalid config_drive provided.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = ('%(err_type)s: %(err_msg)s' % {'err_type': err.exc_type, 'err_msg': err.value})
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = ('UnicodeError: %s' % error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.InvalidRequest, exception.InvalidVolume, exception.MultiplePortsNotApplicable, exception.InvalidFixedIpAndMaxCountRequest, exception.InstanceUserDataMalformed, exception.InstanceUserDataTooLarge, exception.PortNotFound, exception.FixedIpAlreadyInUse, exception.SecurityGroupNotFound, exception.PortRequiresFixedIP, exception.NetworkRequiresSubnet, exception.NetworkNotFound, exception.InvalidBDMVolumeNotBootable, exception.InvalidBDMSnapshot, exception.InvalidBDMVolume, exception.InvalidBDMImage, exception.InvalidBDMBootSequence, exception.InvalidBDMLocalsLimit, exception.InvalidBDMVolumeNotBootable, exception.AutoDiskConfigDisabledByImage, exception.ImageNUMATopologyIncomplete, exception.ImageNUMATopologyForbidden, exception.ImageNUMATopologyAsymmetric, exception.ImageNUMATopologyCPUOutOfRange, exception.ImageNUMATopologyCPUDuplicates, exception.ImageNUMATopologyCPUsUnassigned, exception.ImageNUMATopologyMemoryOutOfRange) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse, exception.InstanceExists, exception.NetworkAmbiguous, exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
if return_reservation_id:
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)<|docstring|>Creates a new server for a given user.<|endoftext|> |
e2b69fa6d45e4cd84541b84ac4d30271c2cfa97c75257d2f142ab30d0d136fd8 | @extensions.expected_errors((400, 404))
@validation.schema(schema_server_update)
def update(self, req, id, body):
'Update server then pass on to version-specific controller.'
ctxt = req.environ['nova.context']
update_dict = {}
if ('name' in body['server']):
update_dict['display_name'] = body['server']['name']
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id, want_objects=True, expected_attrs=['pci_devices'])
try:
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg) | Update server then pass on to version-specific controller. | nova/api/openstack/compute/plugins/v3/servers.py | update | orbitfp7/nova | 5 | python | @extensions.expected_errors((400, 404))
@validation.schema(schema_server_update)
def update(self, req, id, body):
ctxt = req.environ['nova.context']
update_dict = {}
if ('name' in body['server']):
update_dict['display_name'] = body['server']['name']
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id, want_objects=True, expected_attrs=['pci_devices'])
try:
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg) | @extensions.expected_errors((400, 404))
@validation.schema(schema_server_update)
def update(self, req, id, body):
ctxt = req.environ['nova.context']
update_dict = {}
if ('name' in body['server']):
update_dict['display_name'] = body['server']['name']
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point, body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id, want_objects=True, expected_attrs=['pci_devices'])
try:
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)<|docstring|>Update server then pass on to version-specific controller.<|endoftext|> |
b6b087cdc1d240d31edaf591c8a02528052051212c41002e901c3a8559b694a1 | def _resize(self, req, instance_id, flavor_id, **kwargs):
'Begin the resize process with given instance/flavor.'
context = req.environ['nova.context']
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message(), headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _('Unable to locate requested flavor.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _('Resize requires a flavor change.')
raise exc.HTTPBadRequest(explanation=msg)
except (exception.CannotResizeDisk, exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _('You are not authorized to access the image the instance was started with.')
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _('Image that the instance was started with could not be found.')
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost, exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _('Invalid instance image.')
raise exc.HTTPBadRequest(explanation=msg) | Begin the resize process with given instance/flavor. | nova/api/openstack/compute/plugins/v3/servers.py | _resize | orbitfp7/nova | 5 | python | def _resize(self, req, instance_id, flavor_id, **kwargs):
context = req.environ['nova.context']
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message(), headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _('Unable to locate requested flavor.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _('Resize requires a flavor change.')
raise exc.HTTPBadRequest(explanation=msg)
except (exception.CannotResizeDisk, exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _('You are not authorized to access the image the instance was started with.')
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _('Image that the instance was started with could not be found.')
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost, exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _('Invalid instance image.')
raise exc.HTTPBadRequest(explanation=msg) | def _resize(self, req, instance_id, flavor_id, **kwargs):
context = req.environ['nova.context']
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message(), headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _('Unable to locate requested flavor.')
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _('Resize requires a flavor change.')
raise exc.HTTPBadRequest(explanation=msg)
except (exception.CannotResizeDisk, exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _('You are not authorized to access the image the instance was started with.')
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _('Image that the instance was started with could not be found.')
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost, exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _('Invalid instance image.')
raise exc.HTTPBadRequest(explanation=msg)<|docstring|>Begin the resize process with given instance/flavor.<|endoftext|> |
20b00a90f71035346d4d40310b4e1d85acb41584f8cd0a3bea427632a05c3f3b | @wsgi.response(204)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
'Destroys a server.'
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete', id) | Destroys a server. | nova/api/openstack/compute/plugins/v3/servers.py | delete | orbitfp7/nova | 5 | python | @wsgi.response(204)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete', id) | @wsgi.response(204)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete', id)<|docstring|>Destroys a server.<|endoftext|> |
15f4197a00011735f96d1766a8b20cfea0f0cddd65f0f7bbf95bd6504a3b702a | def _image_from_req_data(self, server_dict, create_kwargs):
'Get image data from the request or raise appropriate\n exceptions.\n\n The field imageRef is mandatory when no block devices have been\n defined and must be a proper uuid when present.\n '
image_href = server_dict.get('imageRef')
if ((not image_href) and create_kwargs.get('block_device_mapping')):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _('Missing imageRef attribute')
raise exc.HTTPBadRequest(explanation=msg) | Get image data from the request or raise appropriate
exceptions.
The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present. | nova/api/openstack/compute/plugins/v3/servers.py | _image_from_req_data | orbitfp7/nova | 5 | python | def _image_from_req_data(self, server_dict, create_kwargs):
'Get image data from the request or raise appropriate\n exceptions.\n\n The field imageRef is mandatory when no block devices have been\n defined and must be a proper uuid when present.\n '
image_href = server_dict.get('imageRef')
if ((not image_href) and create_kwargs.get('block_device_mapping')):
return
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _('Missing imageRef attribute')
raise exc.HTTPBadRequest(explanation=msg) | def _image_from_req_data(self, server_dict, create_kwargs):
'Get image data from the request or raise appropriate\n exceptions.\n\n The field imageRef is mandatory when no block devices have been\n defined and must be a proper uuid when present.\n '
image_href = server_dict.get('imageRef')
if ((not image_href) and create_kwargs.get('block_device_mapping')):
return
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _('Missing imageRef attribute')
raise exc.HTTPBadRequest(explanation=msg)<|docstring|>Get image data from the request or raise appropriate
exceptions.
The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present.<|endoftext|> |
a90ce778204aeb68115ade3e96b905d59a5ad411bfcbcabe9f51d23dcb5e8c9a | @wsgi.response(202)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.action('resize')
@validation.schema(schema_server_resize)
def _action_resize(self, req, id, body):
'Resizes a given instance to the flavor size requested.'
resize_dict = body['resize']
flavor_ref = str(resize_dict['flavorRef'])
resize_kwargs = {}
if list(self.resize_extension_manager):
self.resize_extension_manager.map(self._resize_extension_point, resize_dict, resize_kwargs)
self._resize(req, id, flavor_ref, **resize_kwargs) | Resizes a given instance to the flavor size requested. | nova/api/openstack/compute/plugins/v3/servers.py | _action_resize | orbitfp7/nova | 5 | python | @wsgi.response(202)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.action('resize')
@validation.schema(schema_server_resize)
def _action_resize(self, req, id, body):
resize_dict = body['resize']
flavor_ref = str(resize_dict['flavorRef'])
resize_kwargs = {}
if list(self.resize_extension_manager):
self.resize_extension_manager.map(self._resize_extension_point, resize_dict, resize_kwargs)
self._resize(req, id, flavor_ref, **resize_kwargs) | @wsgi.response(202)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.action('resize')
@validation.schema(schema_server_resize)
def _action_resize(self, req, id, body):
resize_dict = body['resize']
flavor_ref = str(resize_dict['flavorRef'])
resize_kwargs = {}
if list(self.resize_extension_manager):
self.resize_extension_manager.map(self._resize_extension_point, resize_dict, resize_kwargs)
self._resize(req, id, flavor_ref, **resize_kwargs)<|docstring|>Resizes a given instance to the flavor size requested.<|endoftext|> |
267003561968a8070b79769c80e01e16700ea6b587f4f166717e19f8e062e2a0 | @wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild)
def _action_rebuild(self, req, id, body):
'Rebuild an instance with the given attributes.'
rebuild_dict = body['rebuild']
image_href = rebuild_dict['imageRef']
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {'name': 'display_name', 'metadata': 'metadata'}
rebuild_kwargs = {}
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point, rebuild_dict, rebuild_kwargs)
for (request_attribute, instance_attribute) in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context, instance, image_href, password, **rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'rebuild', id)
except exception.InstanceNotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)
except exception.ImageNotFound:
msg = _('Cannot find image for rebuild')
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj) | Rebuild an instance with the given attributes. | nova/api/openstack/compute/plugins/v3/servers.py | _action_rebuild | orbitfp7/nova | 5 | python | @wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild)
def _action_rebuild(self, req, id, body):
rebuild_dict = body['rebuild']
image_href = rebuild_dict['imageRef']
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {'name': 'display_name', 'metadata': 'metadata'}
rebuild_kwargs = {}
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point, rebuild_dict, rebuild_kwargs)
for (request_attribute, instance_attribute) in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context, instance, image_href, password, **rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'rebuild', id)
except exception.InstanceNotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)
except exception.ImageNotFound:
msg = _('Cannot find image for rebuild')
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj) | @wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild)
def _action_rebuild(self, req, id, body):
rebuild_dict = body['rebuild']
image_href = rebuild_dict['imageRef']
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {'name': 'display_name', 'metadata': 'metadata'}
rebuild_kwargs = {}
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point, rebuild_dict, rebuild_kwargs)
for (request_attribute, instance_attribute) in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context, instance, image_href, password, **rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'rebuild', id)
except exception.InstanceNotFound:
msg = _('Instance could not be found')
raise exc.HTTPNotFound(explanation=msg)
except exception.ImageNotFound:
msg = _('Cannot find image for rebuild')
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)<|docstring|>Rebuild an instance with the given attributes.<|endoftext|> |
05dea88617f99d0ec29f72c18ceb2135e71b86c8a670cd21b4b37c1626e3be31 | @wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('createImage')
@common.check_snapshots_enabled
@validation.schema(schema_servers.create_image)
def _action_create_image(self, req, id, body):
'Snapshot a server instance.'
context = req.environ['nova.context']
entity = body['createImage']
image_name = entity['name']
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance, bdms):
img = instance.image_ref
if (not img):
properties = bdms.root_metadata(context, self.compute_api.image_api, self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(context, instance, image_meta, image_name, extra_properties=metadata)
else:
image = self.compute_api.snapshot(context, instance, image_name, extra_properties=metadata)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp | Snapshot a server instance. | nova/api/openstack/compute/plugins/v3/servers.py | _action_create_image | orbitfp7/nova | 5 | python | @wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('createImage')
@common.check_snapshots_enabled
@validation.schema(schema_servers.create_image)
def _action_create_image(self, req, id, body):
context = req.environ['nova.context']
entity = body['createImage']
image_name = entity['name']
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance, bdms):
img = instance.image_ref
if (not img):
properties = bdms.root_metadata(context, self.compute_api.image_api, self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(context, instance, image_meta, image_name, extra_properties=metadata)
else:
image = self.compute_api.snapshot(context, instance, image_name, extra_properties=metadata)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp | @wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('createImage')
@common.check_snapshots_enabled
@validation.schema(schema_servers.create_image)
def _action_create_image(self, req, id, body):
context = req.environ['nova.context']
entity = body['createImage']
image_name = entity['name']
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance, bdms):
img = instance.image_ref
if (not img):
properties = bdms.root_metadata(context, self.compute_api.image_api, self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(context, instance, image_meta, image_name, extra_properties=metadata)
else:
image = self.compute_api.snapshot(context, instance, image_name, extra_properties=metadata)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp<|docstring|>Snapshot a server instance.<|endoftext|> |
2cb271e678a2fc620c5a102b70aff9740609d9b3fb1867e023f71be918c643bf | def _get_server_admin_password(self, server):
'Determine the admin password for a server on creation.'
try:
password = server['adminPass']
except KeyError:
password = utils.generate_password()
return password | Determine the admin password for a server on creation. | nova/api/openstack/compute/plugins/v3/servers.py | _get_server_admin_password | orbitfp7/nova | 5 | python | def _get_server_admin_password(self, server):
try:
password = server['adminPass']
except KeyError:
password = utils.generate_password()
return password | def _get_server_admin_password(self, server):
try:
password = server['adminPass']
except KeyError:
password = utils.generate_password()
return password<|docstring|>Determine the admin password for a server on creation.<|endoftext|> |
5765123e7b1b6673244adb700e6a6ab96c241bdb33f4cdafd5a6e625d9723092 | def _get_server_search_options(self):
'Return server search options allowed by non-admin.'
return ('reservation_id', 'name', 'status', 'image', 'flavor', 'ip', 'changes-since', 'all_tenants') | Return server search options allowed by non-admin. | nova/api/openstack/compute/plugins/v3/servers.py | _get_server_search_options | orbitfp7/nova | 5 | python | def _get_server_search_options(self):
return ('reservation_id', 'name', 'status', 'image', 'flavor', 'ip', 'changes-since', 'all_tenants') | def _get_server_search_options(self):
return ('reservation_id', 'name', 'status', 'image', 'flavor', 'ip', 'changes-since', 'all_tenants')<|docstring|>Return server search options allowed by non-admin.<|endoftext|> |
0906f14a861671fa13b82257be8d3b5ac0249980d226e1aecd3bc08404a647b3 | @wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
'Start an instance.'
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'start', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message()) | Start an instance. | nova/api/openstack/compute/plugins/v3/servers.py | _start_server | orbitfp7/nova | 5 | python | @wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'start', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message()) | @wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'start', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())<|docstring|>Start an instance.<|endoftext|> |
4ba5347a5c9743a29f5675bf61f3f6195320ceaaa90353d34756464e4b1f10e5 | @wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
'Stop an instance.'
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'stop', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message()) | Stop an instance. | nova/api/openstack/compute/plugins/v3/servers.py | _stop_server | orbitfp7/nova | 5 | python | @wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'stop', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message()) | @wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'stop', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())<|docstring|>Stop an instance.<|endoftext|> |
262b9341d2d0d88d53f3873242395202e66cda630cb5b79b07a6bac00c2e218a | def has_handle(fpath):
'\n Returns true if the file is in use by a process\n '
for proc in psutil.process_iter():
try:
for item in proc.open_files():
if (fpath == item.path):
return True
except Exception:
pass
return False | Returns true if the file is in use by a process | backend/galvanalyser/harvester/harvester.py | has_handle | Battery-Intelligence-Lab/galvanalyser | 13 | python | def has_handle(fpath):
'\n \n '
for proc in psutil.process_iter():
try:
for item in proc.open_files():
if (fpath == item.path):
return True
except Exception:
pass
return False | def has_handle(fpath):
'\n \n '
for proc in psutil.process_iter():
try:
for item in proc.open_files():
if (fpath == item.path):
return True
except Exception:
pass
return False<|docstring|>Returns true if the file is in use by a process<|endoftext|> |
af6abd34d74df4e82232302fc9cff3d3b7198accd8bd88938b90c3b4b022c2c7 | def import_file(base_path, file_path_row, harvester_name, conn):
'\n Attempts to import a given file\n '
absolute_path = file_path_row.monitored_path
if ((not os.path.isabs(absolute_path)) and (base_path is not None)):
absolute_path = os.path.join(base_path, absolute_path)
fullpath = os.path.join(absolute_path, file_path_row.observed_path)
print('')
if (not os.path.isfile(fullpath)):
print(('Is not a file, skipping: ' + fullpath))
return
print(('Importing ' + fullpath))
rows_updated = file_path_row.update_observed_file_state_if_state_is('IMPORTING', 'STABLE', conn)
if (rows_updated == 0):
print('File was not stable as expected, skipping import')
return
try:
input_file = None
for input_file_cls in registered_input_files:
try:
print('Tried input reader {}'.format(input_file_cls))
input_file = input_file_cls(fullpath)
except Exception as e:
print('...failed with: ', type(e), e)
else:
print('...succeeded...')
break
if (input_file is None):
raise UnsupportedFileTypeError
conn.autocommit = False
with conn:
dataset_row = DatasetRow.select_from_name_date(name=input_file.metadata['Dataset Name'], date=input_file.metadata['Date of Test'], conn=conn)
is_new_dataset = (dataset_row is None)
last_data = None
if is_new_dataset:
dataset_row = DatasetRow(name=input_file.metadata['Dataset Name'], date=input_file.metadata['Date of Test'], dataset_type=input_file.metadata['Machine Type'])
dataset_row.insert(conn)
print(('Added dataset id ' + str(dataset_row.id)))
else:
print('This dataset is already in the database')
last_data = TimeseriesDataRow.select_latest_by_dataset_id(dataset_row.id, conn)
last_sample_no = max([ts_row.sample_no for ts_row in last_data])
print('last sample number in database = {}'.format(last_sample_no))
print('last sample number in file = {}'.format(input_file.metadata['last_sample_no']))
dataset_id = dataset_row.id
for user_id in file_path_row.monitored_for:
print(' Allowing access to user id', user_id)
access_row = AccessRow(dataset_id=dataset_id, user_id=user_id)
access_row.insert(conn)
input_file.metadata['dataset_id'] = dataset_id
new_data = True
if is_new_dataset:
print('Inserting Data')
TimeseriesDataRow.insert_input_file(input_file, dataset_id, conn)
print('Finished inserting Data')
elif (last_sample_no < input_file.metadata['last_sample_no']):
print('Inserting Additional Data')
TimeseriesDataRow.insert_input_file(input_file, dataset_id, conn, last_values=last_data)
print('Finished Additional Data')
else:
print('Dataset already in database')
new_data = False
if new_data:
RangeLabelRow(dataset_id, 'all', int(input_file.metadata['first_sample_no']), (int(input_file.metadata['last_sample_no']) + 1)).insert(conn)
for (label, sample_range) in input_file.get_data_labels():
print('inserting {}'.format(label))
RangeLabelRow(dataset_id, label, sample_range[0], sample_range[1]).insert(conn)
if ('misc_file_data' in input_file.metadata):
json_dict = input_file.metadata['misc_file_data']
print('Storing misc file metadata')
dataset_row.json_data = json_dict
dataset_row.update(conn, update_equipment=False)
file_path_row.update_observed_file_state('IMPORTED', conn)
print('File successfully imported')
except Exception as e:
conn.autocommit = True
file_path_row.update_observed_file_state('IMPORT_FAILED', conn)
print(('Import failed for ' + fullpath))
traceback.print_exc()
finally:
conn.autocommit = True | Attempts to import a given file | backend/galvanalyser/harvester/harvester.py | import_file | Battery-Intelligence-Lab/galvanalyser | 13 | python | def import_file(base_path, file_path_row, harvester_name, conn):
'\n \n '
absolute_path = file_path_row.monitored_path
if ((not os.path.isabs(absolute_path)) and (base_path is not None)):
absolute_path = os.path.join(base_path, absolute_path)
fullpath = os.path.join(absolute_path, file_path_row.observed_path)
print()
if (not os.path.isfile(fullpath)):
print(('Is not a file, skipping: ' + fullpath))
return
print(('Importing ' + fullpath))
rows_updated = file_path_row.update_observed_file_state_if_state_is('IMPORTING', 'STABLE', conn)
if (rows_updated == 0):
print('File was not stable as expected, skipping import')
return
try:
input_file = None
for input_file_cls in registered_input_files:
try:
print('Tried input reader {}'.format(input_file_cls))
input_file = input_file_cls(fullpath)
except Exception as e:
print('...failed with: ', type(e), e)
else:
print('...succeeded...')
break
if (input_file is None):
raise UnsupportedFileTypeError
conn.autocommit = False
with conn:
dataset_row = DatasetRow.select_from_name_date(name=input_file.metadata['Dataset Name'], date=input_file.metadata['Date of Test'], conn=conn)
is_new_dataset = (dataset_row is None)
last_data = None
if is_new_dataset:
dataset_row = DatasetRow(name=input_file.metadata['Dataset Name'], date=input_file.metadata['Date of Test'], dataset_type=input_file.metadata['Machine Type'])
dataset_row.insert(conn)
print(('Added dataset id ' + str(dataset_row.id)))
else:
print('This dataset is already in the database')
last_data = TimeseriesDataRow.select_latest_by_dataset_id(dataset_row.id, conn)
last_sample_no = max([ts_row.sample_no for ts_row in last_data])
print('last sample number in database = {}'.format(last_sample_no))
print('last sample number in file = {}'.format(input_file.metadata['last_sample_no']))
dataset_id = dataset_row.id
for user_id in file_path_row.monitored_for:
print(' Allowing access to user id', user_id)
access_row = AccessRow(dataset_id=dataset_id, user_id=user_id)
access_row.insert(conn)
input_file.metadata['dataset_id'] = dataset_id
new_data = True
if is_new_dataset:
print('Inserting Data')
TimeseriesDataRow.insert_input_file(input_file, dataset_id, conn)
print('Finished inserting Data')
elif (last_sample_no < input_file.metadata['last_sample_no']):
print('Inserting Additional Data')
TimeseriesDataRow.insert_input_file(input_file, dataset_id, conn, last_values=last_data)
print('Finished Additional Data')
else:
print('Dataset already in database')
new_data = False
if new_data:
RangeLabelRow(dataset_id, 'all', int(input_file.metadata['first_sample_no']), (int(input_file.metadata['last_sample_no']) + 1)).insert(conn)
for (label, sample_range) in input_file.get_data_labels():
print('inserting {}'.format(label))
RangeLabelRow(dataset_id, label, sample_range[0], sample_range[1]).insert(conn)
if ('misc_file_data' in input_file.metadata):
json_dict = input_file.metadata['misc_file_data']
print('Storing misc file metadata')
dataset_row.json_data = json_dict
dataset_row.update(conn, update_equipment=False)
file_path_row.update_observed_file_state('IMPORTED', conn)
print('File successfully imported')
except Exception as e:
conn.autocommit = True
file_path_row.update_observed_file_state('IMPORT_FAILED', conn)
print(('Import failed for ' + fullpath))
traceback.print_exc()
finally:
conn.autocommit = True | def import_file(base_path, file_path_row, harvester_name, conn):
'\n \n '
absolute_path = file_path_row.monitored_path
if ((not os.path.isabs(absolute_path)) and (base_path is not None)):
absolute_path = os.path.join(base_path, absolute_path)
fullpath = os.path.join(absolute_path, file_path_row.observed_path)
print()
if (not os.path.isfile(fullpath)):
print(('Is not a file, skipping: ' + fullpath))
return
print(('Importing ' + fullpath))
rows_updated = file_path_row.update_observed_file_state_if_state_is('IMPORTING', 'STABLE', conn)
if (rows_updated == 0):
print('File was not stable as expected, skipping import')
return
try:
input_file = None
for input_file_cls in registered_input_files:
try:
print('Tried input reader {}'.format(input_file_cls))
input_file = input_file_cls(fullpath)
except Exception as e:
print('...failed with: ', type(e), e)
else:
print('...succeeded...')
break
if (input_file is None):
raise UnsupportedFileTypeError
conn.autocommit = False
with conn:
dataset_row = DatasetRow.select_from_name_date(name=input_file.metadata['Dataset Name'], date=input_file.metadata['Date of Test'], conn=conn)
is_new_dataset = (dataset_row is None)
last_data = None
if is_new_dataset:
dataset_row = DatasetRow(name=input_file.metadata['Dataset Name'], date=input_file.metadata['Date of Test'], dataset_type=input_file.metadata['Machine Type'])
dataset_row.insert(conn)
print(('Added dataset id ' + str(dataset_row.id)))
else:
print('This dataset is already in the database')
last_data = TimeseriesDataRow.select_latest_by_dataset_id(dataset_row.id, conn)
last_sample_no = max([ts_row.sample_no for ts_row in last_data])
print('last sample number in database = {}'.format(last_sample_no))
print('last sample number in file = {}'.format(input_file.metadata['last_sample_no']))
dataset_id = dataset_row.id
for user_id in file_path_row.monitored_for:
print(' Allowing access to user id', user_id)
access_row = AccessRow(dataset_id=dataset_id, user_id=user_id)
access_row.insert(conn)
input_file.metadata['dataset_id'] = dataset_id
new_data = True
if is_new_dataset:
print('Inserting Data')
TimeseriesDataRow.insert_input_file(input_file, dataset_id, conn)
print('Finished inserting Data')
elif (last_sample_no < input_file.metadata['last_sample_no']):
print('Inserting Additional Data')
TimeseriesDataRow.insert_input_file(input_file, dataset_id, conn, last_values=last_data)
print('Finished Additional Data')
else:
print('Dataset already in database')
new_data = False
if new_data:
RangeLabelRow(dataset_id, 'all', int(input_file.metadata['first_sample_no']), (int(input_file.metadata['last_sample_no']) + 1)).insert(conn)
for (label, sample_range) in input_file.get_data_labels():
print('inserting {}'.format(label))
RangeLabelRow(dataset_id, label, sample_range[0], sample_range[1]).insert(conn)
if ('misc_file_data' in input_file.metadata):
json_dict = input_file.metadata['misc_file_data']
print('Storing misc file metadata')
dataset_row.json_data = json_dict
dataset_row.update(conn, update_equipment=False)
file_path_row.update_observed_file_state('IMPORTED', conn)
print('File successfully imported')
except Exception as e:
conn.autocommit = True
file_path_row.update_observed_file_state('IMPORT_FAILED', conn)
print(('Import failed for ' + fullpath))
traceback.print_exc()
finally:
conn.autocommit = True<|docstring|>Attempts to import a given file<|endoftext|> |
ba354d9dd4f19d567b1127b21a129ba8b1cd57f3e5c3ebda409b6577cc7f3bea | def __init__(self, config, params, dataset, iterators):
'Constructs the graph and training/summary ops.'
self.iter = iterators
self.config = config
self.params = params
self.dataset = dataset
self.learning_rate = tf.constant(params['learning_rate'])
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.global_step = tf.Variable(0, trainable=False)
source_name = dataset.input_varname()
(self.input_text, _, _) = self.iter[source_name]
with tf.variable_scope('input'):
input_vector = tf.map_fn((lambda seq: tf_utils.sparse_to_dense_vector(seq, self.dataset.vocab_size)), self.iter[dataset.input_varname()][1])
input_encoded = tf_utils.fc_tube(inputs=tf.cast(input_vector, tf.float32), num_outputs=self.params['encoder_layers'], layers=self.params['encoder_layers'])
cur_graph = tf.get_default_graph()
self.feature_weights = cur_graph.get_tensor_by_name('input/layer_0/weights:0')
self.feature_intercept = cur_graph.get_tensor_by_name('input/layer_0/biases:0')
self.step_output = defaultdict(dict)
for variable in self.config.data_spec[1:]:
if variable['skip']:
continue
with tf.variable_scope((variable['name'] + '_prediction_head')):
if variable['control']:
prediction_input = self.reverse(input_encoded)
else:
prediction_input = tf.identity(input_encoded)
if (variable['type'] == utils.CATEGORICAL):
(preds, mean_loss) = tf_utils.classifier(inputs=prediction_input, labels=self.iter[variable['name']], layers=self.params['classifier_layers'], num_classes=self.dataset.num_levels(variable['name']), hidden=self.params['classifier_units'], dropout=self.dropout, sparse_labels=True)
elif (variable['type'] == utils.CONTINUOUS):
(preds, mean_loss) = tf_utils.regressor(inputs=prediction_input, labels=self.iter[variable['name']], layers=self.params['regressor_layers'], hidden=self.params['regressor_units'], dropout=self.dropout)
else:
raise Exception(('ERROR: unknown type %s for variable %s' % (variable['type'], variable['name'])))
mean_loss = tf.scalar_mul(variable['weight'], mean_loss)
tf.summary.scalar(('%s_loss' % variable['name']), mean_loss)
self.step_output[variable['name']]['input'] = self.iter[variable['name']]
self.step_output[variable['name']]['loss'] = mean_loss
self.step_output[variable['name']]['pred'] = preds
if (self.params['lambda'] > 0):
if (self.params['regularizer'] == 'l2'):
regularizer = tf.contrib.layers.l2_regularizer(self.params['lambda'])
else:
regularizer = tf.contrib.layers.l1_regularizer(self.params['lambda'])
if (self.params['reg_type'] == 'all'):
regularization_weights = tf.trainable_variables()
else:
regularization_weights = [self.feature_weights]
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularization_weights)
else:
regularization_term = 0
tf.summary.scalar('regularization_loss', regularization_term)
self.loss = tf.reduce_sum([x['loss'] for x in self.step_output.values()])
self.loss += regularization_term
tf.summary.scalar('global_loss', self.loss)
self.train_step = tf.contrib.layers.optimize_loss(loss=self.loss, global_step=self.global_step, learning_rate=self.learning_rate, clip_gradients=self.params['gradient_clip'], optimizer='Adam', summaries=['gradient_norm'])
self.trainable_variable_names = [v.name for v in tf.trainable_variables()]
self.summaries = tf.summary.merge_all()
self.saver = tf.train.Saver(tf.global_variables()) | Constructs the graph and training/summary ops. | text-performance-attribution/src/models/neural/a_bow.py | __init__ | mathcass/deconfounded_lexicon_induction | 21 | python | def __init__(self, config, params, dataset, iterators):
self.iter = iterators
self.config = config
self.params = params
self.dataset = dataset
self.learning_rate = tf.constant(params['learning_rate'])
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.global_step = tf.Variable(0, trainable=False)
source_name = dataset.input_varname()
(self.input_text, _, _) = self.iter[source_name]
with tf.variable_scope('input'):
input_vector = tf.map_fn((lambda seq: tf_utils.sparse_to_dense_vector(seq, self.dataset.vocab_size)), self.iter[dataset.input_varname()][1])
input_encoded = tf_utils.fc_tube(inputs=tf.cast(input_vector, tf.float32), num_outputs=self.params['encoder_layers'], layers=self.params['encoder_layers'])
cur_graph = tf.get_default_graph()
self.feature_weights = cur_graph.get_tensor_by_name('input/layer_0/weights:0')
self.feature_intercept = cur_graph.get_tensor_by_name('input/layer_0/biases:0')
self.step_output = defaultdict(dict)
for variable in self.config.data_spec[1:]:
if variable['skip']:
continue
with tf.variable_scope((variable['name'] + '_prediction_head')):
if variable['control']:
prediction_input = self.reverse(input_encoded)
else:
prediction_input = tf.identity(input_encoded)
if (variable['type'] == utils.CATEGORICAL):
(preds, mean_loss) = tf_utils.classifier(inputs=prediction_input, labels=self.iter[variable['name']], layers=self.params['classifier_layers'], num_classes=self.dataset.num_levels(variable['name']), hidden=self.params['classifier_units'], dropout=self.dropout, sparse_labels=True)
elif (variable['type'] == utils.CONTINUOUS):
(preds, mean_loss) = tf_utils.regressor(inputs=prediction_input, labels=self.iter[variable['name']], layers=self.params['regressor_layers'], hidden=self.params['regressor_units'], dropout=self.dropout)
else:
raise Exception(('ERROR: unknown type %s for variable %s' % (variable['type'], variable['name'])))
mean_loss = tf.scalar_mul(variable['weight'], mean_loss)
tf.summary.scalar(('%s_loss' % variable['name']), mean_loss)
self.step_output[variable['name']]['input'] = self.iter[variable['name']]
self.step_output[variable['name']]['loss'] = mean_loss
self.step_output[variable['name']]['pred'] = preds
if (self.params['lambda'] > 0):
if (self.params['regularizer'] == 'l2'):
regularizer = tf.contrib.layers.l2_regularizer(self.params['lambda'])
else:
regularizer = tf.contrib.layers.l1_regularizer(self.params['lambda'])
if (self.params['reg_type'] == 'all'):
regularization_weights = tf.trainable_variables()
else:
regularization_weights = [self.feature_weights]
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularization_weights)
else:
regularization_term = 0
tf.summary.scalar('regularization_loss', regularization_term)
self.loss = tf.reduce_sum([x['loss'] for x in self.step_output.values()])
self.loss += regularization_term
tf.summary.scalar('global_loss', self.loss)
self.train_step = tf.contrib.layers.optimize_loss(loss=self.loss, global_step=self.global_step, learning_rate=self.learning_rate, clip_gradients=self.params['gradient_clip'], optimizer='Adam', summaries=['gradient_norm'])
self.trainable_variable_names = [v.name for v in tf.trainable_variables()]
self.summaries = tf.summary.merge_all()
self.saver = tf.train.Saver(tf.global_variables()) | def __init__(self, config, params, dataset, iterators):
self.iter = iterators
self.config = config
self.params = params
self.dataset = dataset
self.learning_rate = tf.constant(params['learning_rate'])
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.global_step = tf.Variable(0, trainable=False)
source_name = dataset.input_varname()
(self.input_text, _, _) = self.iter[source_name]
with tf.variable_scope('input'):
input_vector = tf.map_fn((lambda seq: tf_utils.sparse_to_dense_vector(seq, self.dataset.vocab_size)), self.iter[dataset.input_varname()][1])
input_encoded = tf_utils.fc_tube(inputs=tf.cast(input_vector, tf.float32), num_outputs=self.params['encoder_layers'], layers=self.params['encoder_layers'])
cur_graph = tf.get_default_graph()
self.feature_weights = cur_graph.get_tensor_by_name('input/layer_0/weights:0')
self.feature_intercept = cur_graph.get_tensor_by_name('input/layer_0/biases:0')
self.step_output = defaultdict(dict)
for variable in self.config.data_spec[1:]:
if variable['skip']:
continue
with tf.variable_scope((variable['name'] + '_prediction_head')):
if variable['control']:
prediction_input = self.reverse(input_encoded)
else:
prediction_input = tf.identity(input_encoded)
if (variable['type'] == utils.CATEGORICAL):
(preds, mean_loss) = tf_utils.classifier(inputs=prediction_input, labels=self.iter[variable['name']], layers=self.params['classifier_layers'], num_classes=self.dataset.num_levels(variable['name']), hidden=self.params['classifier_units'], dropout=self.dropout, sparse_labels=True)
elif (variable['type'] == utils.CONTINUOUS):
(preds, mean_loss) = tf_utils.regressor(inputs=prediction_input, labels=self.iter[variable['name']], layers=self.params['regressor_layers'], hidden=self.params['regressor_units'], dropout=self.dropout)
else:
raise Exception(('ERROR: unknown type %s for variable %s' % (variable['type'], variable['name'])))
mean_loss = tf.scalar_mul(variable['weight'], mean_loss)
tf.summary.scalar(('%s_loss' % variable['name']), mean_loss)
self.step_output[variable['name']]['input'] = self.iter[variable['name']]
self.step_output[variable['name']]['loss'] = mean_loss
self.step_output[variable['name']]['pred'] = preds
if (self.params['lambda'] > 0):
if (self.params['regularizer'] == 'l2'):
regularizer = tf.contrib.layers.l2_regularizer(self.params['lambda'])
else:
regularizer = tf.contrib.layers.l1_regularizer(self.params['lambda'])
if (self.params['reg_type'] == 'all'):
regularization_weights = tf.trainable_variables()
else:
regularization_weights = [self.feature_weights]
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularization_weights)
else:
regularization_term = 0
tf.summary.scalar('regularization_loss', regularization_term)
self.loss = tf.reduce_sum([x['loss'] for x in self.step_output.values()])
self.loss += regularization_term
tf.summary.scalar('global_loss', self.loss)
self.train_step = tf.contrib.layers.optimize_loss(loss=self.loss, global_step=self.global_step, learning_rate=self.learning_rate, clip_gradients=self.params['gradient_clip'], optimizer='Adam', summaries=['gradient_norm'])
self.trainable_variable_names = [v.name for v in tf.trainable_variables()]
self.summaries = tf.summary.merge_all()
self.saver = tf.train.Saver(tf.global_variables())<|docstring|>Constructs the graph and training/summary ops.<|endoftext|> |
09f201c8d863f88fdd3ac738a54a2b5da770cc209f74be94123385eff50396e5 | def reverse(self, in_tensor):
'Reverses the gradients of a tensor of any shape.'
input_shape = in_tensor.get_shape()
out_tensor = reverse_grad(in_tensor)
out_tensor.set_shape(input_shape)
return out_tensor | Reverses the gradients of a tensor of any shape. | text-performance-attribution/src/models/neural/a_bow.py | reverse | mathcass/deconfounded_lexicon_induction | 21 | python | def reverse(self, in_tensor):
input_shape = in_tensor.get_shape()
out_tensor = reverse_grad(in_tensor)
out_tensor.set_shape(input_shape)
return out_tensor | def reverse(self, in_tensor):
input_shape = in_tensor.get_shape()
out_tensor = reverse_grad(in_tensor)
out_tensor.set_shape(input_shape)
return out_tensor<|docstring|>Reverses the gradients of a tensor of any shape.<|endoftext|> |
64bb9a7140d440e7f322f03edba7fe497d36e9d211310f88eea8efa0ebf350a1 | def train(self, sess):
'Trains for a batch.'
ops = [self.global_step, self.train_step, self.summaries]
return sess.run(ops, feed_dict={self.dropout: self.params['dropout']}) | Trains for a batch. | text-performance-attribution/src/models/neural/a_bow.py | train | mathcass/deconfounded_lexicon_induction | 21 | python | def train(self, sess):
ops = [self.global_step, self.train_step, self.summaries]
return sess.run(ops, feed_dict={self.dropout: self.params['dropout']}) | def train(self, sess):
ops = [self.global_step, self.train_step, self.summaries]
return sess.run(ops, feed_dict={self.dropout: self.params['dropout']})<|docstring|>Trains for a batch.<|endoftext|> |
e5a1d942bfe5afffed0668086d3f6ad877fccb559ded8c7dab37d954fed263e8 | def inference_on_batch(self, sess):
'Performs inference on a batch of inputs.\n\n Args:\n sess: tf.Session, the current TensorFlow session.\n\n Returns:\n predictions: dict(string => list(float) or list(list(float)). A mapping\n from variable to predictions or logits for each example in the batch.\n token_importance: dict(string => dict(string => list(float))) or\n dict(string => dict(string => dict(string => list(float)))).\n For continuous variables:\n variable name => feature name => list of attention scores.\n For categorical variables:\n variable name => level => feature name => list of attention scores\n on true positives ONLY.\n '
return self.bow_model_inference(sess, self.feature_weights, self.step_output) | Performs inference on a batch of inputs.
Args:
sess: tf.Session, the current TensorFlow session.
Returns:
predictions: dict(string => list(float) or list(list(float)). A mapping
from variable to predictions or logits for each example in the batch.
token_importance: dict(string => dict(string => list(float))) or
dict(string => dict(string => dict(string => list(float)))).
For continuous variables:
variable name => feature name => list of attention scores.
For categorical variables:
variable name => level => feature name => list of attention scores
on true positives ONLY. | text-performance-attribution/src/models/neural/a_bow.py | inference_on_batch | mathcass/deconfounded_lexicon_induction | 21 | python | def inference_on_batch(self, sess):
'Performs inference on a batch of inputs.\n\n Args:\n sess: tf.Session, the current TensorFlow session.\n\n Returns:\n predictions: dict(string => list(float) or list(list(float)). A mapping\n from variable to predictions or logits for each example in the batch.\n token_importance: dict(string => dict(string => list(float))) or\n dict(string => dict(string => dict(string => list(float)))).\n For continuous variables:\n variable name => feature name => list of attention scores.\n For categorical variables:\n variable name => level => feature name => list of attention scores\n on true positives ONLY.\n '
return self.bow_model_inference(sess, self.feature_weights, self.step_output) | def inference_on_batch(self, sess):
'Performs inference on a batch of inputs.\n\n Args:\n sess: tf.Session, the current TensorFlow session.\n\n Returns:\n predictions: dict(string => list(float) or list(list(float)). A mapping\n from variable to predictions or logits for each example in the batch.\n token_importance: dict(string => dict(string => list(float))) or\n dict(string => dict(string => dict(string => list(float)))).\n For continuous variables:\n variable name => feature name => list of attention scores.\n For categorical variables:\n variable name => level => feature name => list of attention scores\n on true positives ONLY.\n '
return self.bow_model_inference(sess, self.feature_weights, self.step_output)<|docstring|>Performs inference on a batch of inputs.
Args:
sess: tf.Session, the current TensorFlow session.
Returns:
predictions: dict(string => list(float) or list(list(float)). A mapping
from variable to predictions or logits for each example in the batch.
token_importance: dict(string => dict(string => list(float))) or
dict(string => dict(string => dict(string => list(float)))).
For continuous variables:
variable name => feature name => list of attention scores.
For categorical variables:
variable name => level => feature name => list of attention scores
on true positives ONLY.<|endoftext|> |
58c0335137989204b6e6d909e2bf3d66a9991f840a17b0b3a48781f46c616a9b | @property
def ImportTargetList(self):
'DEPRECATED \n Returns\n -------\n - list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number)): Configures a target attribute to be associated with advertised L3 VPN route ranges.\n '
return self._get_attribute(self._SDM_ATT_MAP['ImportTargetList']) | DEPRECATED
Returns
-------
- list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number)): Configures a target attribute to be associated with advertised L3 VPN route ranges. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/importtarget_5de62449ab162506e7d4343bed6cdae9.py | ImportTargetList | OpenIxia/ixnetwork_restpy | 20 | python | @property
def ImportTargetList(self):
'DEPRECATED \n Returns\n -------\n - list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number)): Configures a target attribute to be associated with advertised L3 VPN route ranges.\n '
return self._get_attribute(self._SDM_ATT_MAP['ImportTargetList']) | @property
def ImportTargetList(self):
'DEPRECATED \n Returns\n -------\n - list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number)): Configures a target attribute to be associated with advertised L3 VPN route ranges.\n '
return self._get_attribute(self._SDM_ATT_MAP['ImportTargetList'])<|docstring|>DEPRECATED
Returns
-------
- list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number)): Configures a target attribute to be associated with advertised L3 VPN route ranges.<|endoftext|> |
cf314c44866a3b82f1216d1a798272db168dbccc2d0be1f21b9ef624ab515089 | @property
def ImportTargetListEx(self):
'\n Returns\n -------\n - list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str)): Configures a list of export targets to be associated with advertised L3 VPN routeranges.\n '
return self._get_attribute(self._SDM_ATT_MAP['ImportTargetListEx']) | Returns
-------
- list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str)): Configures a list of export targets to be associated with advertised L3 VPN routeranges. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/importtarget_5de62449ab162506e7d4343bed6cdae9.py | ImportTargetListEx | OpenIxia/ixnetwork_restpy | 20 | python | @property
def ImportTargetListEx(self):
'\n Returns\n -------\n - list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str)): Configures a list of export targets to be associated with advertised L3 VPN routeranges.\n '
return self._get_attribute(self._SDM_ATT_MAP['ImportTargetListEx']) | @property
def ImportTargetListEx(self):
'\n Returns\n -------\n - list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str)): Configures a list of export targets to be associated with advertised L3 VPN routeranges.\n '
return self._get_attribute(self._SDM_ATT_MAP['ImportTargetListEx'])<|docstring|>Returns
-------
- list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str)): Configures a list of export targets to be associated with advertised L3 VPN routeranges.<|endoftext|> |
64a02e70771506588744f4795640f851d73eac591e29a3504edbdd8268d583df | def update(self, ImportTargetList=None, ImportTargetListEx=None):
'Updates importTarget resource on the server.\n\n Args\n ----\n - ImportTargetList (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number))): Configures a target attribute to be associated with advertised L3 VPN route ranges.\n - ImportTargetListEx (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str))): Configures a list of export targets to be associated with advertised L3 VPN routeranges.\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) | Updates importTarget resource on the server.
Args
----
- ImportTargetList (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number))): Configures a target attribute to be associated with advertised L3 VPN route ranges.
- ImportTargetListEx (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str))): Configures a list of export targets to be associated with advertised L3 VPN routeranges.
Raises
------
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/importtarget_5de62449ab162506e7d4343bed6cdae9.py | update | OpenIxia/ixnetwork_restpy | 20 | python | def update(self, ImportTargetList=None, ImportTargetListEx=None):
'Updates importTarget resource on the server.\n\n Args\n ----\n - ImportTargetList (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number))): Configures a target attribute to be associated with advertised L3 VPN route ranges.\n - ImportTargetListEx (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str))): Configures a list of export targets to be associated with advertised L3 VPN routeranges.\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) | def update(self, ImportTargetList=None, ImportTargetListEx=None):
'Updates importTarget resource on the server.\n\n Args\n ----\n - ImportTargetList (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number))): Configures a target attribute to be associated with advertised L3 VPN route ranges.\n - ImportTargetListEx (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str))): Configures a list of export targets to be associated with advertised L3 VPN routeranges.\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))<|docstring|>Updates importTarget resource on the server.
Args
----
- ImportTargetList (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number))): Configures a target attribute to be associated with advertised L3 VPN route ranges.
- ImportTargetListEx (list(dict(arg1:str[as | ip | asNumber2],arg2:number,arg3:str,arg4:number,arg5:number,arg6:number,arg7:str))): Configures a list of export targets to be associated with advertised L3 VPN routeranges.
Raises
------
- ServerError: The server has encountered an uncategorized error condition<|endoftext|> |
047dc3957755063c4e3d5c6df44def0ac3ec541db5673cacc7a3343360623be9 | def render_path(path_to_item):
'Returns a string representation of a path'
result = ''
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += '[{0}]'.format(pth)
else:
result += "['{0}']".format(pth)
return result | Returns a string representation of a path | backend/api/python_http_client/kfp_server_api/exceptions.py | render_path | cohere-ai/pipelines | 2,860 | python | def render_path(path_to_item):
result =
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += '[{0}]'.format(pth)
else:
result += "['{0}']".format(pth)
return result | def render_path(path_to_item):
result =
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += '[{0}]'.format(pth)
else:
result += "['{0}']".format(pth)
return result<|docstring|>Returns a string representation of a path<|endoftext|> |
8a3c9f387b28b2916a7109e3c9912e8b5e2ac0f797bb93fb29678d9842420955 | def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None):
' Raises an exception for TypeErrors\n\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (list): a list of keys an indices to get to the\n current_item\n None if unset\n valid_classes (tuple): the primitive classes that current item\n should be an instance of\n None if unset\n key_type (bool): False if our value is a value in a dict\n True if it is a key in a dict\n False if our item is an item in a list\n None if unset\n '
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg) | Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset | backend/api/python_http_client/kfp_server_api/exceptions.py | __init__ | cohere-ai/pipelines | 2,860 | python | def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None):
' Raises an exception for TypeErrors\n\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (list): a list of keys an indices to get to the\n current_item\n None if unset\n valid_classes (tuple): the primitive classes that current item\n should be an instance of\n None if unset\n key_type (bool): False if our value is a value in a dict\n True if it is a key in a dict\n False if our item is an item in a list\n None if unset\n '
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg) | def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None):
' Raises an exception for TypeErrors\n\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (list): a list of keys an indices to get to the\n current_item\n None if unset\n valid_classes (tuple): the primitive classes that current item\n should be an instance of\n None if unset\n key_type (bool): False if our value is a value in a dict\n True if it is a key in a dict\n False if our item is an item in a list\n None if unset\n '
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)<|docstring|>Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset<|endoftext|> |
6a13eec693e1b80d6a8d0c102b4c058b9b77899327f4491852077fd5622a88b4 | def __init__(self, msg, path_to_item=None):
'\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (list) the path to the exception in the\n received_data dict. None if unset\n '
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg) | Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset | backend/api/python_http_client/kfp_server_api/exceptions.py | __init__ | cohere-ai/pipelines | 2,860 | python | def __init__(self, msg, path_to_item=None):
'\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (list) the path to the exception in the\n received_data dict. None if unset\n '
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg) | def __init__(self, msg, path_to_item=None):
'\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (list) the path to the exception in the\n received_data dict. None if unset\n '
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)<|docstring|>Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset<|endoftext|> |
0921a10a62ed870531baa947690b22d707638f2b1f218d558ca8093dc3854927 | def __init__(self, msg, path_to_item=None):
'\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (None/list) the path to the exception in the\n received_data dict\n '
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg) | Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict | backend/api/python_http_client/kfp_server_api/exceptions.py | __init__ | cohere-ai/pipelines | 2,860 | python | def __init__(self, msg, path_to_item=None):
'\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (None/list) the path to the exception in the\n received_data dict\n '
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg) | def __init__(self, msg, path_to_item=None):
'\n Args:\n msg (str): the exception message\n\n Keyword Args:\n path_to_item (None/list) the path to the exception in the\n received_data dict\n '
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = '{0} at {1}'.format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)<|docstring|>Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict<|endoftext|> |
6ecdc04646639ccb5fb46d055726d5d127af4eac8621e787a1853ba789e1378f | def __str__(self):
'Custom error messages for exception'
error_message = '({0})\nReason: {1}\n'.format(self.status, self.reason)
if self.headers:
error_message += 'HTTP response headers: {0}\n'.format(self.headers)
if self.body:
error_message += 'HTTP response body: {0}\n'.format(self.body)
return error_message | Custom error messages for exception | backend/api/python_http_client/kfp_server_api/exceptions.py | __str__ | cohere-ai/pipelines | 2,860 | python | def __str__(self):
error_message = '({0})\nReason: {1}\n'.format(self.status, self.reason)
if self.headers:
error_message += 'HTTP response headers: {0}\n'.format(self.headers)
if self.body:
error_message += 'HTTP response body: {0}\n'.format(self.body)
return error_message | def __str__(self):
error_message = '({0})\nReason: {1}\n'.format(self.status, self.reason)
if self.headers:
error_message += 'HTTP response headers: {0}\n'.format(self.headers)
if self.body:
error_message += 'HTTP response body: {0}\n'.format(self.body)
return error_message<|docstring|>Custom error messages for exception<|endoftext|> |
932079d3432a1adad3c4ff5e4dc508d29638f7ba5cdd733537f0295939a09a76 | def register(linter):
'Register the reporter classes with the linter.'
linter.register_reporter(JSONReporter) | Register the reporter classes with the linter. | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/json.py | register | YYTVicky/kafka | 35 | python | def register(linter):
linter.register_reporter(JSONReporter) | def register(linter):
linter.register_reporter(JSONReporter)<|docstring|>Register the reporter classes with the linter.<|endoftext|> |
36926124d1fb1214689abf25611f52d622060c65326491b1b3a196db63fa5bb0 | def handle_message(self, message):
'Manage message of different type and in the context of path.'
self.messages.append({'type': message.category, 'module': message.module, 'obj': message.obj, 'line': message.line, 'column': message.column, 'path': message.path, 'symbol': message.symbol, 'message': cgi.escape((message.msg or ''))}) | Manage message of different type and in the context of path. | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/json.py | handle_message | YYTVicky/kafka | 35 | python | def handle_message(self, message):
self.messages.append({'type': message.category, 'module': message.module, 'obj': message.obj, 'line': message.line, 'column': message.column, 'path': message.path, 'symbol': message.symbol, 'message': cgi.escape((message.msg or ))}) | def handle_message(self, message):
self.messages.append({'type': message.category, 'module': message.module, 'obj': message.obj, 'line': message.line, 'column': message.column, 'path': message.path, 'symbol': message.symbol, 'message': cgi.escape((message.msg or ))})<|docstring|>Manage message of different type and in the context of path.<|endoftext|> |
624359255b4d20719d8756409b5bfd4fee2e7309f487ecd4d3dbd0b0451242c9 | def display_messages(self, layout):
'Launch layouts display'
if self.messages:
print(json.dumps(self.messages, indent=4), file=self.out) | Launch layouts display | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/json.py | display_messages | YYTVicky/kafka | 35 | python | def display_messages(self, layout):
if self.messages:
print(json.dumps(self.messages, indent=4), file=self.out) | def display_messages(self, layout):
if self.messages:
print(json.dumps(self.messages, indent=4), file=self.out)<|docstring|>Launch layouts display<|endoftext|> |
3d457b8689aff51acc19520268110c829011a5181fa621a646d07b5c1d15c25a | def display_reports(self, _):
"Don't do nothing in this reporter." | Don't do nothing in this reporter. | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/json.py | display_reports | YYTVicky/kafka | 35 | python | def display_reports(self, _):
| def display_reports(self, _):
<|docstring|>Don't do nothing in this reporter.<|endoftext|> |
dd1bbc2ff3478712cf373fc12647d6508fb9958b11122dd8f23ac703bd933725 | def _display(self, layout):
"Don't do nothing." | Don't do nothing. | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/json.py | _display | YYTVicky/kafka | 35 | python | def _display(self, layout):
| def _display(self, layout):
<|docstring|>Don't do nothing.<|endoftext|> |
ec6c668a925d8de838c000330cf25b01548083d1c0fa4c8431ee5d457a5f6d60 | def __init__(self, lpdb):
'\n Generic class for the localpdb plugins\n :param lpdb: instance of the localpdb.PDB\n '
self.lpdb = lpdb
self.plugin_dir = (self.lpdb.db_path / self.plugin_dir)
self.plv = PluginVersioneer(self.plugin_dir)
self.plugin_version = None
self.set_version(self.plv.installed_plugin_versions)
self.history = self._get_historical_versions()
self.cp_files = []
if ((self.plugin_config['requires_pdb'] or self.plugin_config['requires_cif']) and (self.plugin_version is not None)):
(self.id_dict, self.map_dict) = self.lpdb._pdbv.adjust_pdb_ids({id_: id_ for id_ in self.lpdb.entries.index}, self.plugin_version) | Generic class for the localpdb plugins
:param lpdb: instance of the localpdb.PDB | localpdb/plugins/Plugin.py | __init__ | labstructbioinf/localpdb | 28 | python | def __init__(self, lpdb):
'\n Generic class for the localpdb plugins\n :param lpdb: instance of the localpdb.PDB\n '
self.lpdb = lpdb
self.plugin_dir = (self.lpdb.db_path / self.plugin_dir)
self.plv = PluginVersioneer(self.plugin_dir)
self.plugin_version = None
self.set_version(self.plv.installed_plugin_versions)
self.history = self._get_historical_versions()
self.cp_files = []
if ((self.plugin_config['requires_pdb'] or self.plugin_config['requires_cif']) and (self.plugin_version is not None)):
(self.id_dict, self.map_dict) = self.lpdb._pdbv.adjust_pdb_ids({id_: id_ for id_ in self.lpdb.entries.index}, self.plugin_version) | def __init__(self, lpdb):
'\n Generic class for the localpdb plugins\n :param lpdb: instance of the localpdb.PDB\n '
self.lpdb = lpdb
self.plugin_dir = (self.lpdb.db_path / self.plugin_dir)
self.plv = PluginVersioneer(self.plugin_dir)
self.plugin_version = None
self.set_version(self.plv.installed_plugin_versions)
self.history = self._get_historical_versions()
self.cp_files = []
if ((self.plugin_config['requires_pdb'] or self.plugin_config['requires_cif']) and (self.plugin_version is not None)):
(self.id_dict, self.map_dict) = self.lpdb._pdbv.adjust_pdb_ids({id_: id_ for id_ in self.lpdb.entries.index}, self.plugin_version)<|docstring|>Generic class for the localpdb plugins
:param lpdb: instance of the localpdb.PDB<|endoftext|> |
10808e50cfdca3e16b7d926e8ea4b3e79d9dbbc608ade10f28324b920111915b | def setup(self):
'\n Generic function for plugin setup - calls individual plugin _setup() method\n :return:\n '
if (self.plugin_config['available_historical_versions'] and self.plugin_config['allow_loading_outdated']):
self.set_version(list(self.history.keys()))
if (self.plugin_config['requires_pdb'] or self.plugin_config['requires_cif']):
if self.plugin_config['requires_pdb']:
self.lpdb.entries = self.lpdb.entries[self.lpdb.entries['pdb_fn'].notnull()]
self.lpdb.entries = self.lpdb.entries[(self.lpdb.entries['pdb_fn'] != 'not_compatible')]
if self.plugin_config['requires_cif']:
self.lpdb.entries = self.lpdb.entries[self.lpdb.entries['mmCIF_fn'].notnull()]
if (self.plugin_version not in self.plv.installed_plugin_versions):
try:
self._prep_paths()
info = self._setup()
self.plv.update_logs(version=self.plugin_version, additional_info=info)
if (self.plugin_version != self.lpdb.version):
logger.warning(((f"Installed plugin '{self.plugin_name}' version '{self.plugin_version}'" + f" does not match localpdb (version '{self.lpdb.version}') however plugin permits it.") + ' This is typical for plugins handling the data that is not released in a weekly cycle.'))
except:
self._cleanup()
raise PluginInstallError()
else:
logger.warning(((f"Installed plugin '{self.plugin_name}' version '{self.plugin_version}'" + f" does not match localpdb (version '{self.lpdb.version}') however plugin permits it.") + ' This is typical for plugins handling the data that is not released in a weekly cycle.'))
raise PluginAlreadyInstalledOutdated() | Generic function for plugin setup - calls individual plugin _setup() method
:return: | localpdb/plugins/Plugin.py | setup | labstructbioinf/localpdb | 28 | python | def setup(self):
'\n Generic function for plugin setup - calls individual plugin _setup() method\n :return:\n '
if (self.plugin_config['available_historical_versions'] and self.plugin_config['allow_loading_outdated']):
self.set_version(list(self.history.keys()))
if (self.plugin_config['requires_pdb'] or self.plugin_config['requires_cif']):
if self.plugin_config['requires_pdb']:
self.lpdb.entries = self.lpdb.entries[self.lpdb.entries['pdb_fn'].notnull()]
self.lpdb.entries = self.lpdb.entries[(self.lpdb.entries['pdb_fn'] != 'not_compatible')]
if self.plugin_config['requires_cif']:
self.lpdb.entries = self.lpdb.entries[self.lpdb.entries['mmCIF_fn'].notnull()]
if (self.plugin_version not in self.plv.installed_plugin_versions):
try:
self._prep_paths()
info = self._setup()
self.plv.update_logs(version=self.plugin_version, additional_info=info)
if (self.plugin_version != self.lpdb.version):
logger.warning(((f"Installed plugin '{self.plugin_name}' version '{self.plugin_version}'" + f" does not match localpdb (version '{self.lpdb.version}') however plugin permits it.") + ' This is typical for plugins handling the data that is not released in a weekly cycle.'))
except:
self._cleanup()
raise PluginInstallError()
else:
logger.warning(((f"Installed plugin '{self.plugin_name}' version '{self.plugin_version}'" + f" does not match localpdb (version '{self.lpdb.version}') however plugin permits it.") + ' This is typical for plugins handling the data that is not released in a weekly cycle.'))
raise PluginAlreadyInstalledOutdated() | def setup(self):
'\n Generic function for plugin setup - calls individual plugin _setup() method\n :return:\n '
if (self.plugin_config['available_historical_versions'] and self.plugin_config['allow_loading_outdated']):
self.set_version(list(self.history.keys()))
if (self.plugin_config['requires_pdb'] or self.plugin_config['requires_cif']):
if self.plugin_config['requires_pdb']:
self.lpdb.entries = self.lpdb.entries[self.lpdb.entries['pdb_fn'].notnull()]
self.lpdb.entries = self.lpdb.entries[(self.lpdb.entries['pdb_fn'] != 'not_compatible')]
if self.plugin_config['requires_cif']:
self.lpdb.entries = self.lpdb.entries[self.lpdb.entries['mmCIF_fn'].notnull()]
if (self.plugin_version not in self.plv.installed_plugin_versions):
try:
self._prep_paths()
info = self._setup()
self.plv.update_logs(version=self.plugin_version, additional_info=info)
if (self.plugin_version != self.lpdb.version):
logger.warning(((f"Installed plugin '{self.plugin_name}' version '{self.plugin_version}'" + f" does not match localpdb (version '{self.lpdb.version}') however plugin permits it.") + ' This is typical for plugins handling the data that is not released in a weekly cycle.'))
except:
self._cleanup()
raise PluginInstallError()
else:
logger.warning(((f"Installed plugin '{self.plugin_name}' version '{self.plugin_version}'" + f" does not match localpdb (version '{self.lpdb.version}') however plugin permits it.") + ' This is typical for plugins handling the data that is not released in a weekly cycle.'))
raise PluginAlreadyInstalledOutdated()<|docstring|>Generic function for plugin setup - calls individual plugin _setup() method
:return:<|endoftext|> |
20ec7bb90edacef518465d6abd9759c0174fa6383eba0a7b6aee86570d79e0c6 | def set_version(self, versions):
"\n Version handler for plugins.\n @param versions: list of versions to check.\n @return: If plugin allows loading an outdated version (plugin_config['allow_loading_outdated'] is True), it returns closest\n historical version out of versions passed in the list. If not it should return the exact version matching the lpdb version.\n If no suitable version is found returns None.\n "
if (not self.plugin_config['allow_loading_outdated']):
self.plugin_version = self.lpdb.version
else:
self.plugin_version = self.find_closest_historical_version(self.lpdb.version, versions) | Version handler for plugins.
@param versions: list of versions to check.
@return: If plugin allows loading an outdated version (plugin_config['allow_loading_outdated'] is True), it returns closest
historical version out of versions passed in the list. If not it should return the exact version matching the lpdb version.
If no suitable version is found returns None. | localpdb/plugins/Plugin.py | set_version | labstructbioinf/localpdb | 28 | python | def set_version(self, versions):
"\n Version handler for plugins.\n @param versions: list of versions to check.\n @return: If plugin allows loading an outdated version (plugin_config['allow_loading_outdated'] is True), it returns closest\n historical version out of versions passed in the list. If not it should return the exact version matching the lpdb version.\n If no suitable version is found returns None.\n "
if (not self.plugin_config['allow_loading_outdated']):
self.plugin_version = self.lpdb.version
else:
self.plugin_version = self.find_closest_historical_version(self.lpdb.version, versions) | def set_version(self, versions):
"\n Version handler for plugins.\n @param versions: list of versions to check.\n @return: If plugin allows loading an outdated version (plugin_config['allow_loading_outdated'] is True), it returns closest\n historical version out of versions passed in the list. If not it should return the exact version matching the lpdb version.\n If no suitable version is found returns None.\n "
if (not self.plugin_config['allow_loading_outdated']):
self.plugin_version = self.lpdb.version
else:
self.plugin_version = self.find_closest_historical_version(self.lpdb.version, versions)<|docstring|>Version handler for plugins.
@param versions: list of versions to check.
@return: If plugin allows loading an outdated version (plugin_config['allow_loading_outdated'] is True), it returns closest
historical version out of versions passed in the list. If not it should return the exact version matching the lpdb version.
If no suitable version is found returns None.<|endoftext|> |
c31be5a330262018676a45858021855bd1d443888479d3e8020173f872c972db | @staticmethod
def find_closest_historical_version(version, versions):
'\n Finds closest historical version in list of versions.\n @param version: specified version.\n @param versions: list of versions.\n @return: closest historical version.\n '
diffs = {(ver - version): ver for ver in versions if ((ver - version) <= 0)}
return (diffs[max(diffs, key=(lambda key: diffs[key]))] if (len(diffs) > 0) else None) | Finds closest historical version in list of versions.
@param version: specified version.
@param versions: list of versions.
@return: closest historical version. | localpdb/plugins/Plugin.py | find_closest_historical_version | labstructbioinf/localpdb | 28 | python | @staticmethod
def find_closest_historical_version(version, versions):
'\n Finds closest historical version in list of versions.\n @param version: specified version.\n @param versions: list of versions.\n @return: closest historical version.\n '
diffs = {(ver - version): ver for ver in versions if ((ver - version) <= 0)}
return (diffs[max(diffs, key=(lambda key: diffs[key]))] if (len(diffs) > 0) else None) | @staticmethod
def find_closest_historical_version(version, versions):
'\n Finds closest historical version in list of versions.\n @param version: specified version.\n @param versions: list of versions.\n @return: closest historical version.\n '
diffs = {(ver - version): ver for ver in versions if ((ver - version) <= 0)}
return (diffs[max(diffs, key=(lambda key: diffs[key]))] if (len(diffs) > 0) else None)<|docstring|>Finds closest historical version in list of versions.
@param version: specified version.
@param versions: list of versions.
@return: closest historical version.<|endoftext|> |
fd848561a32b2cf44b398e612f501eb5899c930c4fe92f255d9e2edcb16bd6f5 | def extractGeneratedIdl(output_dir, zap_config_path):
'Find a file Clusters.matter in the output directory and\n place it along with the input zap file.\n\n Intent is to make the "zap content" more humanly understandable.\n '
idl_path = os.path.join(output_dir, 'Clusters.matter')
if (not os.path.exists(idl_path)):
return
target_path = zap_config_path.replace('.zap', '.matter')
if (not target_path.endswith('.matter')):
raise Error(('Unexpected input zap file %s' % self.zap_config))
os.rename(idl_path, target_path) | Find a file Clusters.matter in the output directory and
place it along with the input zap file.
Intent is to make the "zap content" more humanly understandable. | scripts/tools/zap/generate.py | extractGeneratedIdl | minhlez/connectedhomeip | 4 | python | def extractGeneratedIdl(output_dir, zap_config_path):
'Find a file Clusters.matter in the output directory and\n place it along with the input zap file.\n\n Intent is to make the "zap content" more humanly understandable.\n '
idl_path = os.path.join(output_dir, 'Clusters.matter')
if (not os.path.exists(idl_path)):
return
target_path = zap_config_path.replace('.zap', '.matter')
if (not target_path.endswith('.matter')):
raise Error(('Unexpected input zap file %s' % self.zap_config))
os.rename(idl_path, target_path) | def extractGeneratedIdl(output_dir, zap_config_path):
'Find a file Clusters.matter in the output directory and\n place it along with the input zap file.\n\n Intent is to make the "zap content" more humanly understandable.\n '
idl_path = os.path.join(output_dir, 'Clusters.matter')
if (not os.path.exists(idl_path)):
return
target_path = zap_config_path.replace('.zap', '.matter')
if (not target_path.endswith('.matter')):
raise Error(('Unexpected input zap file %s' % self.zap_config))
os.rename(idl_path, target_path)<|docstring|>Find a file Clusters.matter in the output directory and
place it along with the input zap file.
Intent is to make the "zap content" more humanly understandable.<|endoftext|> |
037c768a8b1097c33fb0125c509596bc11f14b69e1758eb664cc4587dbacf2d2 | def proba_density(self, x, y, alpha):
'\n computes p(x | y, alpha)\n '
gamma_k = self.gamma_k
gamma_loc = self.gamma_loc
gamma_scale = alpha
normal_mean = (self.normal_mean * alpha)
normal_sigma = (self.normal_sigma * alpha)
proba_gamma = sts.gamma.pdf(x, gamma_k, loc=gamma_loc, scale=gamma_scale)
proba_normal = sts.norm.pdf(x, loc=normal_mean, scale=normal_sigma)
proba_density = ((y * proba_normal) + ((1 - y) * proba_gamma))
return proba_density | computes p(x | y, alpha) | explore/minitoy_systematics.py | proba_density | victor-estrade/SystGradDescent | 2 | python | def proba_density(self, x, y, alpha):
'\n \n '
gamma_k = self.gamma_k
gamma_loc = self.gamma_loc
gamma_scale = alpha
normal_mean = (self.normal_mean * alpha)
normal_sigma = (self.normal_sigma * alpha)
proba_gamma = sts.gamma.pdf(x, gamma_k, loc=gamma_loc, scale=gamma_scale)
proba_normal = sts.norm.pdf(x, loc=normal_mean, scale=normal_sigma)
proba_density = ((y * proba_normal) + ((1 - y) * proba_gamma))
return proba_density | def proba_density(self, x, y, alpha):
'\n \n '
gamma_k = self.gamma_k
gamma_loc = self.gamma_loc
gamma_scale = alpha
normal_mean = (self.normal_mean * alpha)
normal_sigma = (self.normal_sigma * alpha)
proba_gamma = sts.gamma.pdf(x, gamma_k, loc=gamma_loc, scale=gamma_scale)
proba_normal = sts.norm.pdf(x, loc=normal_mean, scale=normal_sigma)
proba_density = ((y * proba_normal) + ((1 - y) * proba_gamma))
return proba_density<|docstring|>computes p(x | y, alpha)<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.