repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
textX/textX | textx/scoping/tools.py | get_referenced_object_as_list | def get_referenced_object_as_list(
prev_obj, obj, dot_separated_name, desired_type=None):
"""
Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list
"""
res = get_referenced_object(prev_obj, obj, dot_separated_name,
desired_type)
if res is None:
return []
elif type(res) is list:
return res
else:
return [res] | python | def get_referenced_object_as_list(
prev_obj, obj, dot_separated_name, desired_type=None):
"""
Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list
"""
res = get_referenced_object(prev_obj, obj, dot_separated_name,
desired_type)
if res is None:
return []
elif type(res) is list:
return res
else:
return [res] | [
"def",
"get_referenced_object_as_list",
"(",
"prev_obj",
",",
"obj",
",",
"dot_separated_name",
",",
"desired_type",
"=",
"None",
")",
":",
"res",
"=",
"get_referenced_object",
"(",
"prev_obj",
",",
"obj",
",",
"dot_separated_name",
",",
"desired_type",
")",
"if",... | Same as get_referenced_object, but always returns a list.
Args:
prev_obj: see get_referenced_object
obj: see get_referenced_object
dot_separated_name: see get_referenced_object
desired_type: see get_referenced_object
Returns:
same as get_referenced_object, but always returns a list | [
"Same",
"as",
"get_referenced_object",
"but",
"always",
"returns",
"a",
"list",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/scoping/tools.py#L199-L220 | train | 199,600 |
textX/textX | textx/scoping/__init__.py | GlobalModelRepository.load_model | def load_model(
self, the_metamodel, filename, is_main_model, encoding='utf-8',
add_to_local_models=True):
"""
load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model
"""
if not self.local_models.has_model(filename):
if self.all_models.has_model(filename):
new_model = self.all_models.filename_to_model[filename]
else:
# print("LOADING {}".format(filename))
# all models loaded here get their references resolved from the
# root model
new_model = the_metamodel.internal_model_from_file(
filename, pre_ref_resolution_callback=lambda
other_model: self.pre_ref_resolution_callback(other_model),
is_main_model=is_main_model, encoding=encoding)
self.all_models.filename_to_model[filename] = new_model
# print("ADDING {}".format(filename))
if add_to_local_models:
self.local_models.filename_to_model[filename] = new_model
assert self.all_models.has_model(filename) # to be sure...
return self.all_models.filename_to_model[filename] | python | def load_model(
self, the_metamodel, filename, is_main_model, encoding='utf-8',
add_to_local_models=True):
"""
load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model
"""
if not self.local_models.has_model(filename):
if self.all_models.has_model(filename):
new_model = self.all_models.filename_to_model[filename]
else:
# print("LOADING {}".format(filename))
# all models loaded here get their references resolved from the
# root model
new_model = the_metamodel.internal_model_from_file(
filename, pre_ref_resolution_callback=lambda
other_model: self.pre_ref_resolution_callback(other_model),
is_main_model=is_main_model, encoding=encoding)
self.all_models.filename_to_model[filename] = new_model
# print("ADDING {}".format(filename))
if add_to_local_models:
self.local_models.filename_to_model[filename] = new_model
assert self.all_models.has_model(filename) # to be sure...
return self.all_models.filename_to_model[filename] | [
"def",
"load_model",
"(",
"self",
",",
"the_metamodel",
",",
"filename",
",",
"is_main_model",
",",
"encoding",
"=",
"'utf-8'",
",",
"add_to_local_models",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"local_models",
".",
"has_model",
"(",
"filename",
")... | load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model | [
"load",
"a",
"single",
"model"
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/scoping/__init__.py#L197-L227 | train | 199,601 |
textX/textX | textx/cli.py | check | def check(ctx, meta_model_file, model_file, ignore_case):
"""
Check validity of meta-model and optionally model.
"""
debug = ctx.obj['debug']
check_model(meta_model_file, model_file, debug, ignore_case) | python | def check(ctx, meta_model_file, model_file, ignore_case):
"""
Check validity of meta-model and optionally model.
"""
debug = ctx.obj['debug']
check_model(meta_model_file, model_file, debug, ignore_case) | [
"def",
"check",
"(",
"ctx",
",",
"meta_model_file",
",",
"model_file",
",",
"ignore_case",
")",
":",
"debug",
"=",
"ctx",
".",
"obj",
"[",
"'debug'",
"]",
"check_model",
"(",
"meta_model_file",
",",
"model_file",
",",
"debug",
",",
"ignore_case",
")"
] | Check validity of meta-model and optionally model. | [
"Check",
"validity",
"of",
"meta",
"-",
"model",
"and",
"optionally",
"model",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/cli.py#L34-L39 | train | 199,602 |
textX/textX | docs/tutorials/entity/srcgen.py | get_entity_mm | def get_entity_mm():
"""
Builds and returns a meta-model for Entity language.
"""
type_builtins = {
'integer': SimpleType(None, 'integer'),
'string': SimpleType(None, 'string')
}
entity_mm = metamodel_from_file(join(this_folder, 'entity.tx'),
classes=[SimpleType],
builtins=type_builtins)
return entity_mm | python | def get_entity_mm():
"""
Builds and returns a meta-model for Entity language.
"""
type_builtins = {
'integer': SimpleType(None, 'integer'),
'string': SimpleType(None, 'string')
}
entity_mm = metamodel_from_file(join(this_folder, 'entity.tx'),
classes=[SimpleType],
builtins=type_builtins)
return entity_mm | [
"def",
"get_entity_mm",
"(",
")",
":",
"type_builtins",
"=",
"{",
"'integer'",
":",
"SimpleType",
"(",
"None",
",",
"'integer'",
")",
",",
"'string'",
":",
"SimpleType",
"(",
"None",
",",
"'string'",
")",
"}",
"entity_mm",
"=",
"metamodel_from_file",
"(",
... | Builds and returns a meta-model for Entity language. | [
"Builds",
"and",
"returns",
"a",
"meta",
"-",
"model",
"for",
"Entity",
"language",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/docs/tutorials/entity/srcgen.py#L18-L30 | train | 199,603 |
textX/textX | examples/StateMachine/smdot.py | sm_to_dot | def sm_to_dot(model):
"""
Transforms given state machine model to dot str.
"""
dot_str = HEADER
# Render states
first = True
for state in model.states:
dot_str += '{}[label="{{{}{}|{}}}"]\n'.format(
id(state), r"-\> " if first else "", state.name,
"\\n".join(action.name for action in state.actions))
first = False
# Render transitions
for transition in state.transitions:
dot_str += '{} -> {} [label="{}"]\n'\
.format(id(state), id(transition.to_state),
transition.event.name)
# If there are reset events declared render them.
if model.resetEvents:
dot_str += 'reset_events [label="{{Reset Events|{}}}", style=""]\n'\
.format("\\n".join(event.name for event in model.resetEvents))
dot_str += '\n}\n'
return dot_str | python | def sm_to_dot(model):
"""
Transforms given state machine model to dot str.
"""
dot_str = HEADER
# Render states
first = True
for state in model.states:
dot_str += '{}[label="{{{}{}|{}}}"]\n'.format(
id(state), r"-\> " if first else "", state.name,
"\\n".join(action.name for action in state.actions))
first = False
# Render transitions
for transition in state.transitions:
dot_str += '{} -> {} [label="{}"]\n'\
.format(id(state), id(transition.to_state),
transition.event.name)
# If there are reset events declared render them.
if model.resetEvents:
dot_str += 'reset_events [label="{{Reset Events|{}}}", style=""]\n'\
.format("\\n".join(event.name for event in model.resetEvents))
dot_str += '\n}\n'
return dot_str | [
"def",
"sm_to_dot",
"(",
"model",
")",
":",
"dot_str",
"=",
"HEADER",
"# Render states",
"first",
"=",
"True",
"for",
"state",
"in",
"model",
".",
"states",
":",
"dot_str",
"+=",
"'{}[label=\"{{{}{}|{}}}\"]\\n'",
".",
"format",
"(",
"id",
"(",
"state",
")",
... | Transforms given state machine model to dot str. | [
"Transforms",
"given",
"state",
"machine",
"model",
"to",
"dot",
"str",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/examples/StateMachine/smdot.py#L24-L51 | train | 199,604 |
textX/textX | textx/langapi.py | get_language | def get_language(language_name):
"""
Returns a callable that instantiates meta-model for the given language.
"""
langs = list(pkg_resources.iter_entry_points(group=LANG_EP,
name=language_name))
if not langs:
raise TextXError('Language "{}" is not registered.'
.format(language_name))
if len(langs) > 1:
# Multiple languages defined with the same name
raise TextXError('Language "{}" registered multiple times:\n{}'
.format(language_name,
"\n".join([l.dist for l in langs])))
return langs[0].load()() | python | def get_language(language_name):
"""
Returns a callable that instantiates meta-model for the given language.
"""
langs = list(pkg_resources.iter_entry_points(group=LANG_EP,
name=language_name))
if not langs:
raise TextXError('Language "{}" is not registered.'
.format(language_name))
if len(langs) > 1:
# Multiple languages defined with the same name
raise TextXError('Language "{}" registered multiple times:\n{}'
.format(language_name,
"\n".join([l.dist for l in langs])))
return langs[0].load()() | [
"def",
"get_language",
"(",
"language_name",
")",
":",
"langs",
"=",
"list",
"(",
"pkg_resources",
".",
"iter_entry_points",
"(",
"group",
"=",
"LANG_EP",
",",
"name",
"=",
"language_name",
")",
")",
"if",
"not",
"langs",
":",
"raise",
"TextXError",
"(",
"... | Returns a callable that instantiates meta-model for the given language. | [
"Returns",
"a",
"callable",
"that",
"instantiates",
"meta",
"-",
"model",
"for",
"the",
"given",
"language",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/langapi.py#L21-L39 | train | 199,605 |
textX/textX | textx/model.py | get_model | def get_model(obj):
"""
Finds model root element for the given object.
"""
p = obj
while hasattr(p, 'parent'):
p = p.parent
return p | python | def get_model(obj):
"""
Finds model root element for the given object.
"""
p = obj
while hasattr(p, 'parent'):
p = p.parent
return p | [
"def",
"get_model",
"(",
"obj",
")",
":",
"p",
"=",
"obj",
"while",
"hasattr",
"(",
"p",
",",
"'parent'",
")",
":",
"p",
"=",
"p",
".",
"parent",
"return",
"p"
] | Finds model root element for the given object. | [
"Finds",
"model",
"root",
"element",
"for",
"the",
"given",
"object",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/model.py#L32-L39 | train | 199,606 |
textX/textX | textx/model.py | get_parent_of_type | def get_parent_of_type(typ, obj):
"""
Finds first object up the parent chain of the given type.
If no parent of the given type exists None is returned.
Args:
typ(str or python class): The type of the model object we are
looking for.
obj (model object): Python model object which is the start of the
search process.
"""
if type(typ) is not text:
typ = typ.__name__
while hasattr(obj, 'parent'):
obj = obj.parent
if obj.__class__.__name__ == typ:
return obj | python | def get_parent_of_type(typ, obj):
"""
Finds first object up the parent chain of the given type.
If no parent of the given type exists None is returned.
Args:
typ(str or python class): The type of the model object we are
looking for.
obj (model object): Python model object which is the start of the
search process.
"""
if type(typ) is not text:
typ = typ.__name__
while hasattr(obj, 'parent'):
obj = obj.parent
if obj.__class__.__name__ == typ:
return obj | [
"def",
"get_parent_of_type",
"(",
"typ",
",",
"obj",
")",
":",
"if",
"type",
"(",
"typ",
")",
"is",
"not",
"text",
":",
"typ",
"=",
"typ",
".",
"__name__",
"while",
"hasattr",
"(",
"obj",
",",
"'parent'",
")",
":",
"obj",
"=",
"obj",
".",
"parent",... | Finds first object up the parent chain of the given type.
If no parent of the given type exists None is returned.
Args:
typ(str or python class): The type of the model object we are
looking for.
obj (model object): Python model object which is the start of the
search process. | [
"Finds",
"first",
"object",
"up",
"the",
"parent",
"chain",
"of",
"the",
"given",
"type",
".",
"If",
"no",
"parent",
"of",
"the",
"given",
"type",
"exists",
"None",
"is",
"returned",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/model.py#L49-L66 | train | 199,607 |
textX/textX | textx/model.py | get_model_parser | def get_model_parser(top_rule, comments_model, **kwargs):
"""
Creates model parser for the given language.
"""
class TextXModelParser(Parser):
"""
Parser created from textual textX language description.
Semantic actions for this parser will construct object
graph representing model on the given language.
"""
def __init__(self, *args, **kwargs):
super(TextXModelParser, self).__init__(*args, **kwargs)
# By default first rule is starting rule
# and must be followed by the EOF
self.parser_model = Sequence(
nodes=[top_rule, EOF()], rule_name='Model', root=True)
self.comments_model = comments_model
# Stack for metaclass instances
self._inst_stack = []
# Dict for cross-ref resolving
# { id(class): { obj.name: obj}}
self._instances = {}
# List to keep track of all cross-ref that need to be resolved
# Contained elements are tuples: (instance, metaattr, cross-ref)
self._crossrefs = []
def clone(self):
"""
Responsibility: create a clone in order to parse a separate file.
It must be possible that more than one clone exist in parallel,
without being influenced by other parser clones.
Returns:
A clone of this parser
"""
import copy
the_clone = copy.copy(self) # shallow copy
# create new objects for parse-dependent data
the_clone._inst_stack = []
the_clone._instances = {}
the_clone._crossrefs = []
# TODO self.memoization = memoization
the_clone.comments = []
the_clone.comment_positions = {}
the_clone.sem_actions = {}
return the_clone
def _parse(self):
try:
return self.parser_model.parse(self)
except NoMatch as e:
line, col = e.parser.pos_to_linecol(e.position)
raise TextXSyntaxError(message=text(e),
line=line,
col=col,
expected_rules=e.rules)
def get_model_from_file(self, file_name, encoding, debug,
pre_ref_resolution_callback=None,
is_main_model=True):
"""
Creates model from the parse tree from the previous parse call.
If file_name is given file will be parsed before model
construction.
"""
with codecs.open(file_name, 'r', encoding) as f:
model_str = f.read()
model = self.get_model_from_str(
model_str, file_name=file_name, debug=debug,
pre_ref_resolution_callback=pre_ref_resolution_callback,
is_main_model=is_main_model, encoding=encoding)
return model
def get_model_from_str(self, model_str, file_name=None, debug=None,
pre_ref_resolution_callback=None,
is_main_model=True, encoding='utf-8'):
"""
Parses given string and creates model object graph.
"""
old_debug_state = self.debug
try:
if debug is not None:
self.debug = debug
if self.debug:
self.dprint("*** PARSING MODEL ***")
self.parse(model_str, file_name=file_name)
# Transform parse tree to model. Skip root node which
# represents the whole file ending in EOF.
model = parse_tree_to_objgraph(
self, self.parse_tree[0], file_name=file_name,
pre_ref_resolution_callback=pre_ref_resolution_callback,
is_main_model=is_main_model, encoding=encoding)
finally:
if debug is not None:
self.debug = old_debug_state
try:
model._tx_metamodel = self.metamodel
except AttributeError:
# model is some primitive python type (e.g. str)
pass
return model
return TextXModelParser(**kwargs) | python | def get_model_parser(top_rule, comments_model, **kwargs):
"""
Creates model parser for the given language.
"""
class TextXModelParser(Parser):
"""
Parser created from textual textX language description.
Semantic actions for this parser will construct object
graph representing model on the given language.
"""
def __init__(self, *args, **kwargs):
super(TextXModelParser, self).__init__(*args, **kwargs)
# By default first rule is starting rule
# and must be followed by the EOF
self.parser_model = Sequence(
nodes=[top_rule, EOF()], rule_name='Model', root=True)
self.comments_model = comments_model
# Stack for metaclass instances
self._inst_stack = []
# Dict for cross-ref resolving
# { id(class): { obj.name: obj}}
self._instances = {}
# List to keep track of all cross-ref that need to be resolved
# Contained elements are tuples: (instance, metaattr, cross-ref)
self._crossrefs = []
def clone(self):
"""
Responsibility: create a clone in order to parse a separate file.
It must be possible that more than one clone exist in parallel,
without being influenced by other parser clones.
Returns:
A clone of this parser
"""
import copy
the_clone = copy.copy(self) # shallow copy
# create new objects for parse-dependent data
the_clone._inst_stack = []
the_clone._instances = {}
the_clone._crossrefs = []
# TODO self.memoization = memoization
the_clone.comments = []
the_clone.comment_positions = {}
the_clone.sem_actions = {}
return the_clone
def _parse(self):
try:
return self.parser_model.parse(self)
except NoMatch as e:
line, col = e.parser.pos_to_linecol(e.position)
raise TextXSyntaxError(message=text(e),
line=line,
col=col,
expected_rules=e.rules)
def get_model_from_file(self, file_name, encoding, debug,
pre_ref_resolution_callback=None,
is_main_model=True):
"""
Creates model from the parse tree from the previous parse call.
If file_name is given file will be parsed before model
construction.
"""
with codecs.open(file_name, 'r', encoding) as f:
model_str = f.read()
model = self.get_model_from_str(
model_str, file_name=file_name, debug=debug,
pre_ref_resolution_callback=pre_ref_resolution_callback,
is_main_model=is_main_model, encoding=encoding)
return model
def get_model_from_str(self, model_str, file_name=None, debug=None,
pre_ref_resolution_callback=None,
is_main_model=True, encoding='utf-8'):
"""
Parses given string and creates model object graph.
"""
old_debug_state = self.debug
try:
if debug is not None:
self.debug = debug
if self.debug:
self.dprint("*** PARSING MODEL ***")
self.parse(model_str, file_name=file_name)
# Transform parse tree to model. Skip root node which
# represents the whole file ending in EOF.
model = parse_tree_to_objgraph(
self, self.parse_tree[0], file_name=file_name,
pre_ref_resolution_callback=pre_ref_resolution_callback,
is_main_model=is_main_model, encoding=encoding)
finally:
if debug is not None:
self.debug = old_debug_state
try:
model._tx_metamodel = self.metamodel
except AttributeError:
# model is some primitive python type (e.g. str)
pass
return model
return TextXModelParser(**kwargs) | [
"def",
"get_model_parser",
"(",
"top_rule",
",",
"comments_model",
",",
"*",
"*",
"kwargs",
")",
":",
"class",
"TextXModelParser",
"(",
"Parser",
")",
":",
"\"\"\"\n Parser created from textual textX language description.\n Semantic actions for this parser will cons... | Creates model parser for the given language. | [
"Creates",
"model",
"parser",
"for",
"the",
"given",
"language",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/model.py#L167-L284 | train | 199,608 |
textX/textX | textx/model.py | ReferenceResolver.resolve_one_step | def resolve_one_step(self):
"""
Resolves model references.
"""
metamodel = self.parser.metamodel
current_crossrefs = self.parser._crossrefs
# print("DEBUG: Current crossrefs #: {}".
# format(len(current_crossrefs)))
new_crossrefs = []
self.delayed_crossrefs = []
resolved_crossref_count = 0
# -------------------------
# start of resolve-loop
# -------------------------
default_scope = DefaultScopeProvider()
for obj, attr, crossref in current_crossrefs:
if (get_model(obj) == self.model):
attr_value = getattr(obj, attr.name)
attr_refs = [obj.__class__.__name__ + "." + attr.name,
"*." + attr.name, obj.__class__.__name__ + ".*",
"*.*"]
for attr_ref in attr_refs:
if attr_ref in metamodel.scope_providers:
if self.parser.debug:
self.parser.dprint(" FOUND {}".format(attr_ref))
resolved = metamodel.scope_providers[attr_ref](
obj, attr, crossref)
break
else:
resolved = default_scope(obj, attr, crossref)
# Collect cross-references for textx-tools
if resolved and not type(resolved) is Postponed:
if metamodel.textx_tools_support:
self.pos_crossref_list.append(
RefRulePosition(
name=crossref.obj_name,
ref_pos_start=crossref.position,
ref_pos_end=crossref.position + len(
resolved.name),
def_pos_start=resolved._tx_position,
def_pos_end=resolved._tx_position_end))
if not resolved:
# As a fall-back search builtins if given
if metamodel.builtins:
if crossref.obj_name in metamodel.builtins:
# TODO: Classes must match
resolved = metamodel.builtins[crossref.obj_name]
if not resolved:
line, col = self.parser.pos_to_linecol(crossref.position)
raise TextXSemanticError(
message='Unknown object "{}" of class "{}"'.format(
crossref.obj_name, crossref.cls.__name__),
line=line, col=col, err_type=UNKNOWN_OBJ_ERROR,
expected_obj_cls=crossref.cls,
filename=self.model._tx_filename)
if type(resolved) is Postponed:
self.delayed_crossrefs.append((obj, attr, crossref))
new_crossrefs.append((obj, attr, crossref))
else:
resolved_crossref_count += 1
if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]:
attr_value.append(resolved)
else:
setattr(obj, attr.name, resolved)
else: # crossref not in model
new_crossrefs.append((obj, attr, crossref))
# -------------------------
# end of resolve-loop
# -------------------------
# store cross-refs from other models in the parser list (for later
# processing)
self.parser._crossrefs = new_crossrefs
# print("DEBUG: Next crossrefs #: {}".format(len(new_crossrefs)))
return (resolved_crossref_count, self.delayed_crossrefs) | python | def resolve_one_step(self):
"""
Resolves model references.
"""
metamodel = self.parser.metamodel
current_crossrefs = self.parser._crossrefs
# print("DEBUG: Current crossrefs #: {}".
# format(len(current_crossrefs)))
new_crossrefs = []
self.delayed_crossrefs = []
resolved_crossref_count = 0
# -------------------------
# start of resolve-loop
# -------------------------
default_scope = DefaultScopeProvider()
for obj, attr, crossref in current_crossrefs:
if (get_model(obj) == self.model):
attr_value = getattr(obj, attr.name)
attr_refs = [obj.__class__.__name__ + "." + attr.name,
"*." + attr.name, obj.__class__.__name__ + ".*",
"*.*"]
for attr_ref in attr_refs:
if attr_ref in metamodel.scope_providers:
if self.parser.debug:
self.parser.dprint(" FOUND {}".format(attr_ref))
resolved = metamodel.scope_providers[attr_ref](
obj, attr, crossref)
break
else:
resolved = default_scope(obj, attr, crossref)
# Collect cross-references for textx-tools
if resolved and not type(resolved) is Postponed:
if metamodel.textx_tools_support:
self.pos_crossref_list.append(
RefRulePosition(
name=crossref.obj_name,
ref_pos_start=crossref.position,
ref_pos_end=crossref.position + len(
resolved.name),
def_pos_start=resolved._tx_position,
def_pos_end=resolved._tx_position_end))
if not resolved:
# As a fall-back search builtins if given
if metamodel.builtins:
if crossref.obj_name in metamodel.builtins:
# TODO: Classes must match
resolved = metamodel.builtins[crossref.obj_name]
if not resolved:
line, col = self.parser.pos_to_linecol(crossref.position)
raise TextXSemanticError(
message='Unknown object "{}" of class "{}"'.format(
crossref.obj_name, crossref.cls.__name__),
line=line, col=col, err_type=UNKNOWN_OBJ_ERROR,
expected_obj_cls=crossref.cls,
filename=self.model._tx_filename)
if type(resolved) is Postponed:
self.delayed_crossrefs.append((obj, attr, crossref))
new_crossrefs.append((obj, attr, crossref))
else:
resolved_crossref_count += 1
if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]:
attr_value.append(resolved)
else:
setattr(obj, attr.name, resolved)
else: # crossref not in model
new_crossrefs.append((obj, attr, crossref))
# -------------------------
# end of resolve-loop
# -------------------------
# store cross-refs from other models in the parser list (for later
# processing)
self.parser._crossrefs = new_crossrefs
# print("DEBUG: Next crossrefs #: {}".format(len(new_crossrefs)))
return (resolved_crossref_count, self.delayed_crossrefs) | [
"def",
"resolve_one_step",
"(",
"self",
")",
":",
"metamodel",
"=",
"self",
".",
"parser",
".",
"metamodel",
"current_crossrefs",
"=",
"self",
".",
"parser",
".",
"_crossrefs",
"# print(\"DEBUG: Current crossrefs #: {}\".",
"# format(len(current_crossrefs)))",
"new_c... | Resolves model references. | [
"Resolves",
"model",
"references",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/model.py#L726-L805 | train | 199,609 |
textX/textX | textx/lang.py | python_type | def python_type(textx_type_name):
"""Return Python type from the name of base textx type."""
return {
'ID': text,
'BOOL': bool,
'INT': int,
'FLOAT': float,
'STRICTFLOAT': float,
'STRING': text,
'NUMBER': float,
'BASETYPE': text,
}.get(textx_type_name, textx_type_name) | python | def python_type(textx_type_name):
"""Return Python type from the name of base textx type."""
return {
'ID': text,
'BOOL': bool,
'INT': int,
'FLOAT': float,
'STRICTFLOAT': float,
'STRING': text,
'NUMBER': float,
'BASETYPE': text,
}.get(textx_type_name, textx_type_name) | [
"def",
"python_type",
"(",
"textx_type_name",
")",
":",
"return",
"{",
"'ID'",
":",
"text",
",",
"'BOOL'",
":",
"bool",
",",
"'INT'",
":",
"int",
",",
"'FLOAT'",
":",
"float",
",",
"'STRICTFLOAT'",
":",
"float",
",",
"'STRING'",
":",
"text",
",",
"'NUM... | Return Python type from the name of base textx type. | [
"Return",
"Python",
"type",
"from",
"the",
"name",
"of",
"base",
"textx",
"type",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/lang.py#L116-L127 | train | 199,610 |
textX/textX | textx/lang.py | language_from_str | def language_from_str(language_def, metamodel):
"""
Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language.
"""
if type(language_def) is not text:
raise TextXError("textX accepts only unicode strings.")
if metamodel.debug:
metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***")
# Check the cache for already conctructed textX parser
if metamodel.debug in textX_parsers:
parser = textX_parsers[metamodel.debug]
else:
# Create parser for TextX grammars using
# the arpeggio grammar specified in this module
parser = ParserPython(textx_model, comment_def=comment,
ignore_case=False,
reduce_tree=False,
memoization=metamodel.memoization,
debug=metamodel.debug,
file=metamodel.file)
# Cache it for subsequent calls
textX_parsers[metamodel.debug] = parser
# Parse language description with textX parser
try:
parse_tree = parser.parse(language_def)
except NoMatch as e:
line, col = parser.pos_to_linecol(e.position)
raise TextXSyntaxError(text(e), line, col)
# Construct new parser and meta-model based on the given language
# description.
lang_parser = visit_parse_tree(parse_tree,
TextXVisitor(parser, metamodel))
# Meta-model is constructed. Validate its semantics.
metamodel.validate()
# Here we connect meta-model and language parser for convenience.
lang_parser.metamodel = metamodel
metamodel._parser_blueprint = lang_parser
if metamodel.debug:
# Create dot file for debuging purposes
PMDOTExporter().exportFile(
lang_parser.parser_model,
"{}_parser_model.dot".format(metamodel.rootcls.__name__))
return lang_parser | python | def language_from_str(language_def, metamodel):
"""
Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language.
"""
if type(language_def) is not text:
raise TextXError("textX accepts only unicode strings.")
if metamodel.debug:
metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***")
# Check the cache for already conctructed textX parser
if metamodel.debug in textX_parsers:
parser = textX_parsers[metamodel.debug]
else:
# Create parser for TextX grammars using
# the arpeggio grammar specified in this module
parser = ParserPython(textx_model, comment_def=comment,
ignore_case=False,
reduce_tree=False,
memoization=metamodel.memoization,
debug=metamodel.debug,
file=metamodel.file)
# Cache it for subsequent calls
textX_parsers[metamodel.debug] = parser
# Parse language description with textX parser
try:
parse_tree = parser.parse(language_def)
except NoMatch as e:
line, col = parser.pos_to_linecol(e.position)
raise TextXSyntaxError(text(e), line, col)
# Construct new parser and meta-model based on the given language
# description.
lang_parser = visit_parse_tree(parse_tree,
TextXVisitor(parser, metamodel))
# Meta-model is constructed. Validate its semantics.
metamodel.validate()
# Here we connect meta-model and language parser for convenience.
lang_parser.metamodel = metamodel
metamodel._parser_blueprint = lang_parser
if metamodel.debug:
# Create dot file for debuging purposes
PMDOTExporter().exportFile(
lang_parser.parser_model,
"{}_parser_model.dot".format(metamodel.rootcls.__name__))
return lang_parser | [
"def",
"language_from_str",
"(",
"language_def",
",",
"metamodel",
")",
":",
"if",
"type",
"(",
"language_def",
")",
"is",
"not",
"text",
":",
"raise",
"TextXError",
"(",
"\"textX accepts only unicode strings.\"",
")",
"if",
"metamodel",
".",
"debug",
":",
"meta... | Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language. | [
"Constructs",
"parser",
"and",
"initializes",
"metamodel",
"from",
"language",
"description",
"given",
"in",
"textX",
"language",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/lang.py#L885-L945 | train | 199,611 |
textX/textX | textx/lang.py | TextXVisitor.second_textx_model | def second_textx_model(self, model_parser):
"""Cross reference resolving for parser model."""
if self.grammar_parser.debug:
self.grammar_parser.dprint("RESOLVING MODEL PARSER: second_pass")
self._resolve_rule_refs(self.grammar_parser, model_parser)
self._determine_rule_types(model_parser.metamodel)
self._resolve_cls_refs(self.grammar_parser, model_parser)
return model_parser | python | def second_textx_model(self, model_parser):
"""Cross reference resolving for parser model."""
if self.grammar_parser.debug:
self.grammar_parser.dprint("RESOLVING MODEL PARSER: second_pass")
self._resolve_rule_refs(self.grammar_parser, model_parser)
self._determine_rule_types(model_parser.metamodel)
self._resolve_cls_refs(self.grammar_parser, model_parser)
return model_parser | [
"def",
"second_textx_model",
"(",
"self",
",",
"model_parser",
")",
":",
"if",
"self",
".",
"grammar_parser",
".",
"debug",
":",
"self",
".",
"grammar_parser",
".",
"dprint",
"(",
"\"RESOLVING MODEL PARSER: second_pass\"",
")",
"self",
".",
"_resolve_rule_refs",
"... | Cross reference resolving for parser model. | [
"Cross",
"reference",
"resolving",
"for",
"parser",
"model",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/lang.py#L211-L221 | train | 199,612 |
textX/textX | textx/lang.py | TextXVisitor._resolve_rule_refs | def _resolve_rule_refs(self, grammar_parser, model_parser):
"""Resolves parser ParsingExpression crossrefs."""
def _resolve_rule(rule):
"""
Recursively resolve peg rule references.
Args:
rule(ParsingExpression or RuleCrossRef)
"""
if not isinstance(rule, RuleCrossRef) and rule in resolved_rules:
return rule
resolved_rules.add(rule)
if grammar_parser.debug:
grammar_parser.dprint("Resolving rule: {}".format(rule))
if type(rule) is RuleCrossRef:
rule_name = rule.rule_name
suppress = rule.suppress
if rule_name in model_parser.metamodel:
rule = model_parser.metamodel[rule_name]._tx_peg_rule
if type(rule) is RuleCrossRef:
rule = _resolve_rule(rule)
model_parser.metamodel[rule_name]._tx_peg_rule = rule
if suppress:
# Special case. Suppression on rule reference.
_tx_class = rule._tx_class
rule = Sequence(nodes=[rule],
rule_name=rule_name,
suppress=suppress)
rule._tx_class = _tx_class
else:
line, col = grammar_parser.pos_to_linecol(rule.position)
raise TextXSemanticError(
'Unexisting rule "{}" at position {}.'
.format(rule.rule_name,
(line, col)), line, col)
assert isinstance(rule, ParsingExpression),\
"{}:{}".format(type(rule), text(rule))
# Recurse into subrules, and resolve rules.
for idx, child in enumerate(rule.nodes):
if child not in resolved_rules:
child = _resolve_rule(child)
rule.nodes[idx] = child
return rule
# Two pass resolving
for i in range(2):
if grammar_parser.debug:
grammar_parser.dprint("RESOLVING RULE CROSS-REFS - PASS {}"
.format(i + 1))
resolved_rules = set()
_resolve_rule(model_parser.parser_model)
# Resolve rules of all meta-classes to handle unreferenced
# rules also.
for cls in model_parser.metamodel:
cls._tx_peg_rule = _resolve_rule(cls._tx_peg_rule) | python | def _resolve_rule_refs(self, grammar_parser, model_parser):
"""Resolves parser ParsingExpression crossrefs."""
def _resolve_rule(rule):
"""
Recursively resolve peg rule references.
Args:
rule(ParsingExpression or RuleCrossRef)
"""
if not isinstance(rule, RuleCrossRef) and rule in resolved_rules:
return rule
resolved_rules.add(rule)
if grammar_parser.debug:
grammar_parser.dprint("Resolving rule: {}".format(rule))
if type(rule) is RuleCrossRef:
rule_name = rule.rule_name
suppress = rule.suppress
if rule_name in model_parser.metamodel:
rule = model_parser.metamodel[rule_name]._tx_peg_rule
if type(rule) is RuleCrossRef:
rule = _resolve_rule(rule)
model_parser.metamodel[rule_name]._tx_peg_rule = rule
if suppress:
# Special case. Suppression on rule reference.
_tx_class = rule._tx_class
rule = Sequence(nodes=[rule],
rule_name=rule_name,
suppress=suppress)
rule._tx_class = _tx_class
else:
line, col = grammar_parser.pos_to_linecol(rule.position)
raise TextXSemanticError(
'Unexisting rule "{}" at position {}.'
.format(rule.rule_name,
(line, col)), line, col)
assert isinstance(rule, ParsingExpression),\
"{}:{}".format(type(rule), text(rule))
# Recurse into subrules, and resolve rules.
for idx, child in enumerate(rule.nodes):
if child not in resolved_rules:
child = _resolve_rule(child)
rule.nodes[idx] = child
return rule
# Two pass resolving
for i in range(2):
if grammar_parser.debug:
grammar_parser.dprint("RESOLVING RULE CROSS-REFS - PASS {}"
.format(i + 1))
resolved_rules = set()
_resolve_rule(model_parser.parser_model)
# Resolve rules of all meta-classes to handle unreferenced
# rules also.
for cls in model_parser.metamodel:
cls._tx_peg_rule = _resolve_rule(cls._tx_peg_rule) | [
"def",
"_resolve_rule_refs",
"(",
"self",
",",
"grammar_parser",
",",
"model_parser",
")",
":",
"def",
"_resolve_rule",
"(",
"rule",
")",
":",
"\"\"\"\n Recursively resolve peg rule references.\n\n Args:\n rule(ParsingExpression or RuleCrossRef)\n... | Resolves parser ParsingExpression crossrefs. | [
"Resolves",
"parser",
"ParsingExpression",
"crossrefs",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/lang.py#L223-L285 | train | 199,613 |
textX/textX | textx/export.py | match_abstract_str | def match_abstract_str(cls):
"""
For a given abstract or match rule meta-class returns a nice string
representation for the body.
"""
def r(s):
if s.root:
if s in visited or s.rule_name in ALL_TYPE_NAMES or \
(hasattr(s, '_tx_class') and
s._tx_class._tx_type is not RULE_MATCH):
return s.rule_name
visited.add(s)
if isinstance(s, Match):
result = text(s)
elif isinstance(s, OrderedChoice):
result = "|".join([r(x) for x in s.nodes])
elif isinstance(s, Sequence):
result = " ".join([r(x) for x in s.nodes])
elif isinstance(s, ZeroOrMore):
result = "({})*".format(r(s.nodes[0]))
elif isinstance(s, OneOrMore):
result = "({})+".format(r(s.nodes[0]))
elif isinstance(s, Optional):
result = "{}?".format(r(s.nodes[0]))
elif isinstance(s, SyntaxPredicate):
result = ""
return "{}{}".format(result, "-" if s.suppress else "")
mstr = ""
if cls.__name__ not in ALL_TYPE_NAMES and \
not (cls._tx_type is RULE_ABSTRACT and
cls.__name__ != cls._tx_peg_rule.rule_name):
e = cls._tx_peg_rule
visited = set()
if not isinstance(e, Match):
visited.add(e)
if isinstance(e, OrderedChoice):
mstr = "|".join([r(x) for x in e.nodes
if x.rule_name in BASE_TYPE_NAMES or not x.root])
elif isinstance(e, Sequence):
mstr = " ".join([r(x) for x in e.nodes])
else:
mstr = r(e)
mstr = dot_escape(mstr)
return mstr | python | def match_abstract_str(cls):
"""
For a given abstract or match rule meta-class returns a nice string
representation for the body.
"""
def r(s):
if s.root:
if s in visited or s.rule_name in ALL_TYPE_NAMES or \
(hasattr(s, '_tx_class') and
s._tx_class._tx_type is not RULE_MATCH):
return s.rule_name
visited.add(s)
if isinstance(s, Match):
result = text(s)
elif isinstance(s, OrderedChoice):
result = "|".join([r(x) for x in s.nodes])
elif isinstance(s, Sequence):
result = " ".join([r(x) for x in s.nodes])
elif isinstance(s, ZeroOrMore):
result = "({})*".format(r(s.nodes[0]))
elif isinstance(s, OneOrMore):
result = "({})+".format(r(s.nodes[0]))
elif isinstance(s, Optional):
result = "{}?".format(r(s.nodes[0]))
elif isinstance(s, SyntaxPredicate):
result = ""
return "{}{}".format(result, "-" if s.suppress else "")
mstr = ""
if cls.__name__ not in ALL_TYPE_NAMES and \
not (cls._tx_type is RULE_ABSTRACT and
cls.__name__ != cls._tx_peg_rule.rule_name):
e = cls._tx_peg_rule
visited = set()
if not isinstance(e, Match):
visited.add(e)
if isinstance(e, OrderedChoice):
mstr = "|".join([r(x) for x in e.nodes
if x.rule_name in BASE_TYPE_NAMES or not x.root])
elif isinstance(e, Sequence):
mstr = " ".join([r(x) for x in e.nodes])
else:
mstr = r(e)
mstr = dot_escape(mstr)
return mstr | [
"def",
"match_abstract_str",
"(",
"cls",
")",
":",
"def",
"r",
"(",
"s",
")",
":",
"if",
"s",
".",
"root",
":",
"if",
"s",
"in",
"visited",
"or",
"s",
".",
"rule_name",
"in",
"ALL_TYPE_NAMES",
"or",
"(",
"hasattr",
"(",
"s",
",",
"'_tx_class'",
")"... | For a given abstract or match rule meta-class returns a nice string
representation for the body. | [
"For",
"a",
"given",
"abstract",
"or",
"match",
"rule",
"meta",
"-",
"class",
"returns",
"a",
"nice",
"string",
"representation",
"for",
"the",
"body",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/export.py#L40-L87 | train | 199,614 |
textX/textX | textx/metamodel.py | metamodel_from_str | def metamodel_from_str(lang_desc, metamodel=None, **kwargs):
"""
Creates a new metamodel from the textX description given as a string.
Args:
lang_desc(str): A textX language description.
metamodel(TextXMetaModel): A metamodel that should be used.
other params: See TextXMetaModel.
"""
if not metamodel:
metamodel = TextXMetaModel(**kwargs)
language_from_str(lang_desc, metamodel)
return metamodel | python | def metamodel_from_str(lang_desc, metamodel=None, **kwargs):
"""
Creates a new metamodel from the textX description given as a string.
Args:
lang_desc(str): A textX language description.
metamodel(TextXMetaModel): A metamodel that should be used.
other params: See TextXMetaModel.
"""
if not metamodel:
metamodel = TextXMetaModel(**kwargs)
language_from_str(lang_desc, metamodel)
return metamodel | [
"def",
"metamodel_from_str",
"(",
"lang_desc",
",",
"metamodel",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"metamodel",
":",
"metamodel",
"=",
"TextXMetaModel",
"(",
"*",
"*",
"kwargs",
")",
"language_from_str",
"(",
"lang_desc",
",",
"m... | Creates a new metamodel from the textX description given as a string.
Args:
lang_desc(str): A textX language description.
metamodel(TextXMetaModel): A metamodel that should be used.
other params: See TextXMetaModel. | [
"Creates",
"a",
"new",
"metamodel",
"from",
"the",
"textX",
"description",
"given",
"as",
"a",
"string",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L600-L616 | train | 199,615 |
textX/textX | textx/metamodel.py | metamodel_from_file | def metamodel_from_file(file_name, **kwargs):
"""
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel | python | def metamodel_from_file(file_name, **kwargs):
"""
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel | [
"def",
"metamodel_from_file",
"(",
"file_name",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"file_name",
",",
"'r'",
",",
"'utf-8'",
")",
"as",
"f",
":",
"lang_desc",
"=",
"f",
".",
"read",
"(",
")",
"metamodel",
"=",
"meta... | Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str. | [
"Creates",
"new",
"metamodel",
"from",
"the",
"given",
"file",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L619-L634 | train | 199,616 |
textX/textX | textx/metamodel.py | TextXMetaModel._init_class | def _init_class(self, cls, peg_rule, position, position_end=None,
inherits=None, root=False, rule_type=RULE_MATCH):
"""
Setup meta-class special attributes, namespaces etc. This is called
both for textX created classes as well as user classes.
"""
cls._tx_metamodel = self
# Attribute information (MetaAttr instances) keyed by name.
cls._tx_attrs = OrderedDict()
# A list of inheriting classes
cls._tx_inh_by = inherits if inherits else []
cls._tx_position = position
cls._tx_position_end = \
position if position_end is None else position_end
# The type of the rule this meta-class results from.
# There are three rule types: common, abstract and match
# Base types are match rules.
cls._tx_type = rule_type
cls._tx_peg_rule = peg_rule
if peg_rule:
peg_rule._tx_class = cls
# Push this class and PEG rule in the current namespace
current_namespace = self.namespaces[self._namespace_stack[-1]]
cls._tx_fqn = self._cls_fqn(cls)
current_namespace[cls.__name__] = cls
if root:
self.rootcls = cls | python | def _init_class(self, cls, peg_rule, position, position_end=None,
inherits=None, root=False, rule_type=RULE_MATCH):
"""
Setup meta-class special attributes, namespaces etc. This is called
both for textX created classes as well as user classes.
"""
cls._tx_metamodel = self
# Attribute information (MetaAttr instances) keyed by name.
cls._tx_attrs = OrderedDict()
# A list of inheriting classes
cls._tx_inh_by = inherits if inherits else []
cls._tx_position = position
cls._tx_position_end = \
position if position_end is None else position_end
# The type of the rule this meta-class results from.
# There are three rule types: common, abstract and match
# Base types are match rules.
cls._tx_type = rule_type
cls._tx_peg_rule = peg_rule
if peg_rule:
peg_rule._tx_class = cls
# Push this class and PEG rule in the current namespace
current_namespace = self.namespaces[self._namespace_stack[-1]]
cls._tx_fqn = self._cls_fqn(cls)
current_namespace[cls.__name__] = cls
if root:
self.rootcls = cls | [
"def",
"_init_class",
"(",
"self",
",",
"cls",
",",
"peg_rule",
",",
"position",
",",
"position_end",
"=",
"None",
",",
"inherits",
"=",
"None",
",",
"root",
"=",
"False",
",",
"rule_type",
"=",
"RULE_MATCH",
")",
":",
"cls",
".",
"_tx_metamodel",
"=",
... | Setup meta-class special attributes, namespaces etc. This is called
both for textX created classes as well as user classes. | [
"Setup",
"meta",
"-",
"class",
"special",
"attributes",
"namespaces",
"etc",
".",
"This",
"is",
"called",
"both",
"for",
"textX",
"created",
"classes",
"as",
"well",
"as",
"user",
"classes",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L342-L376 | train | 199,617 |
textX/textX | textx/metamodel.py | TextXMetaModel._cls_fqn | def _cls_fqn(self, cls):
"""
Returns fully qualified name for the class based on current namespace
and the class name.
"""
ns = self._namespace_stack[-1]
if ns in ['__base__', None]:
return cls.__name__
else:
return ns + '.' + cls.__name__ | python | def _cls_fqn(self, cls):
"""
Returns fully qualified name for the class based on current namespace
and the class name.
"""
ns = self._namespace_stack[-1]
if ns in ['__base__', None]:
return cls.__name__
else:
return ns + '.' + cls.__name__ | [
"def",
"_cls_fqn",
"(",
"self",
",",
"cls",
")",
":",
"ns",
"=",
"self",
".",
"_namespace_stack",
"[",
"-",
"1",
"]",
"if",
"ns",
"in",
"[",
"'__base__'",
",",
"None",
"]",
":",
"return",
"cls",
".",
"__name__",
"else",
":",
"return",
"ns",
"+",
... | Returns fully qualified name for the class based on current namespace
and the class name. | [
"Returns",
"fully",
"qualified",
"name",
"for",
"the",
"class",
"based",
"on",
"current",
"namespace",
"and",
"the",
"class",
"name",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L378-L387 | train | 199,618 |
textX/textX | textx/metamodel.py | TextXMetaModel._new_cls_attr | def _new_cls_attr(self, clazz, name, cls=None, mult=MULT_ONE, cont=True,
ref=False, bool_assignment=False, position=0):
"""Creates new meta attribute of this class."""
attr = MetaAttr(name, cls, mult, cont, ref, bool_assignment,
position)
clazz._tx_attrs[name] = attr
return attr | python | def _new_cls_attr(self, clazz, name, cls=None, mult=MULT_ONE, cont=True,
ref=False, bool_assignment=False, position=0):
"""Creates new meta attribute of this class."""
attr = MetaAttr(name, cls, mult, cont, ref, bool_assignment,
position)
clazz._tx_attrs[name] = attr
return attr | [
"def",
"_new_cls_attr",
"(",
"self",
",",
"clazz",
",",
"name",
",",
"cls",
"=",
"None",
",",
"mult",
"=",
"MULT_ONE",
",",
"cont",
"=",
"True",
",",
"ref",
"=",
"False",
",",
"bool_assignment",
"=",
"False",
",",
"position",
"=",
"0",
")",
":",
"a... | Creates new meta attribute of this class. | [
"Creates",
"new",
"meta",
"attribute",
"of",
"this",
"class",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L428-L434 | train | 199,619 |
textX/textX | textx/metamodel.py | TextXMetaModel.convert | def convert(self, value, _type):
"""
Convert instances of textx types and match rules to python types.
"""
return self.type_convertors.get(_type, lambda x: x)(value) | python | def convert(self, value, _type):
"""
Convert instances of textx types and match rules to python types.
"""
return self.type_convertors.get(_type, lambda x: x)(value) | [
"def",
"convert",
"(",
"self",
",",
"value",
",",
"_type",
")",
":",
"return",
"self",
".",
"type_convertors",
".",
"get",
"(",
"_type",
",",
"lambda",
"x",
":",
"x",
")",
"(",
"value",
")"
] | Convert instances of textx types and match rules to python types. | [
"Convert",
"instances",
"of",
"textx",
"types",
"and",
"match",
"rules",
"to",
"python",
"types",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L436-L440 | train | 199,620 |
textX/textX | textx/metamodel.py | TextXMetaModel.register_obj_processors | def register_obj_processors(self, obj_processors):
"""
Object processors are callables that will be called after
each successful model object construction.
Those callables receive model object as its parameter.
Registration of new object processors will replace previous.
Args:
obj_processors(dict): A dictionary where key=class name,
value=callable
"""
self.obj_processors = obj_processors
self.type_convertors.update(obj_processors) | python | def register_obj_processors(self, obj_processors):
"""
Object processors are callables that will be called after
each successful model object construction.
Those callables receive model object as its parameter.
Registration of new object processors will replace previous.
Args:
obj_processors(dict): A dictionary where key=class name,
value=callable
"""
self.obj_processors = obj_processors
self.type_convertors.update(obj_processors) | [
"def",
"register_obj_processors",
"(",
"self",
",",
"obj_processors",
")",
":",
"self",
".",
"obj_processors",
"=",
"obj_processors",
"self",
".",
"type_convertors",
".",
"update",
"(",
"obj_processors",
")"
] | Object processors are callables that will be called after
each successful model object construction.
Those callables receive model object as its parameter.
Registration of new object processors will replace previous.
Args:
obj_processors(dict): A dictionary where key=class name,
value=callable | [
"Object",
"processors",
"are",
"callables",
"that",
"will",
"be",
"called",
"after",
"each",
"successful",
"model",
"object",
"construction",
".",
"Those",
"callables",
"receive",
"model",
"object",
"as",
"its",
"parameter",
".",
"Registration",
"of",
"new",
"ob... | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L585-L597 | train | 199,621 |
textX/textX | examples/StateMachine/state_machine.py | StateMachine.interpret | def interpret(self):
"""
Main interpreter loop.
"""
self.print_menu()
while True:
try:
event = input()
if event == 'q':
return
event = int(event)
event = self.model.events[event-1]
except Exception:
print('Invalid input')
self.event(event)
self.print_menu() | python | def interpret(self):
"""
Main interpreter loop.
"""
self.print_menu()
while True:
try:
event = input()
if event == 'q':
return
event = int(event)
event = self.model.events[event-1]
except Exception:
print('Invalid input')
self.event(event)
self.print_menu() | [
"def",
"interpret",
"(",
"self",
")",
":",
"self",
".",
"print_menu",
"(",
")",
"while",
"True",
":",
"try",
":",
"event",
"=",
"input",
"(",
")",
"if",
"event",
"==",
"'q'",
":",
"return",
"event",
"=",
"int",
"(",
"event",
")",
"event",
"=",
"s... | Main interpreter loop. | [
"Main",
"interpreter",
"loop",
"."
] | 5796ac38116ad86584392dbecdbf923ede746361 | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/examples/StateMachine/state_machine.py#L46-L62 | train | 199,622 |
deeplook/svglib | svglib/svglib.py | find_font | def find_font(font_name):
"""Return the font and a Boolean indicating if the match is exact."""
if font_name in STANDARD_FONT_NAMES:
return font_name, True
elif font_name in _registered_fonts:
return font_name, _registered_fonts[font_name]
NOT_FOUND = (None, False)
try:
# Try first to register the font if it exists as ttf,
# based on ReportLab font search.
registerFont(TTFont(font_name, '%s.ttf' % font_name))
_registered_fonts[font_name] = True
return font_name, True
except TTFError:
# Try searching with Fontconfig
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', font_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = pipe.communicate()[0].decode(sys.getfilesystemencoding())
font_path = output.split('\n')[0]
except OSError:
return NOT_FOUND
try:
registerFont(TTFont(font_name, font_path))
except TTFError:
return NOT_FOUND
# Fontconfig may return a default font totally unrelated with font_name
exact = font_name.lower() in os.path.basename(font_path).lower()
_registered_fonts[font_name] = exact
return font_name, exact | python | def find_font(font_name):
"""Return the font and a Boolean indicating if the match is exact."""
if font_name in STANDARD_FONT_NAMES:
return font_name, True
elif font_name in _registered_fonts:
return font_name, _registered_fonts[font_name]
NOT_FOUND = (None, False)
try:
# Try first to register the font if it exists as ttf,
# based on ReportLab font search.
registerFont(TTFont(font_name, '%s.ttf' % font_name))
_registered_fonts[font_name] = True
return font_name, True
except TTFError:
# Try searching with Fontconfig
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', font_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = pipe.communicate()[0].decode(sys.getfilesystemencoding())
font_path = output.split('\n')[0]
except OSError:
return NOT_FOUND
try:
registerFont(TTFont(font_name, font_path))
except TTFError:
return NOT_FOUND
# Fontconfig may return a default font totally unrelated with font_name
exact = font_name.lower() in os.path.basename(font_path).lower()
_registered_fonts[font_name] = exact
return font_name, exact | [
"def",
"find_font",
"(",
"font_name",
")",
":",
"if",
"font_name",
"in",
"STANDARD_FONT_NAMES",
":",
"return",
"font_name",
",",
"True",
"elif",
"font_name",
"in",
"_registered_fonts",
":",
"return",
"font_name",
",",
"_registered_fonts",
"[",
"font_name",
"]",
... | Return the font and a Boolean indicating if the match is exact. | [
"Return",
"the",
"font",
"and",
"a",
"Boolean",
"indicating",
"if",
"the",
"match",
"is",
"exact",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L79-L112 | train | 199,623 |
deeplook/svglib | svglib/svglib.py | svg2rlg | def svg2rlg(path, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz":
with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
path = path[:-1]
unzipped = True
svg_root = load_svg_file(path)
if svg_root is None:
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path, **kwargs)
drawing = svgRenderer.render(svg_root)
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing | python | def svg2rlg(path, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz":
with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
path = path[:-1]
unzipped = True
svg_root = load_svg_file(path)
if svg_root is None:
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path, **kwargs)
drawing = svgRenderer.render(svg_root)
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing | [
"def",
"svg2rlg",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"# unzip .svgz file into .svg",
"unzipped",
"=",
"False",
"if",
"isinstance",
"(",
"path",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
... | Convert an SVG file to an RLG Drawing object. | [
"Convert",
"an",
"SVG",
"file",
"to",
"an",
"RLG",
"Drawing",
"object",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1324-L1347 | train | 199,624 |
deeplook/svglib | svglib/svglib.py | AttributeConverter.parseMultiAttributes | def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs | python | def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs | [
"def",
"parseMultiAttributes",
"(",
"self",
",",
"line",
")",
":",
"attrs",
"=",
"line",
".",
"split",
"(",
"';'",
")",
"attrs",
"=",
"[",
"a",
".",
"strip",
"(",
")",
"for",
"a",
"in",
"attrs",
"]",
"attrs",
"=",
"filter",
"(",
"lambda",
"a",
":... | Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'. | [
"Try",
"parsing",
"compound",
"attribute",
"string",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L180-L196 | train | 199,625 |
deeplook/svglib | svglib/svglib.py | AttributeConverter.findAttr | def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return '' | python | def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return '' | [
"def",
"findAttr",
"(",
"self",
",",
"svgNode",
",",
"name",
")",
":",
"# This needs also to lookup values like \"url(#SomeName)\"...",
"if",
"self",
".",
"css_rules",
"is",
"not",
"None",
"and",
"not",
"svgNode",
".",
"attrib",
".",
"get",
"(",
"'__rules_applied'... | Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned. | [
"Search",
"an",
"attribute",
"with",
"some",
"name",
"in",
"some",
"node",
"or",
"above",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L198-L223 | train | 199,626 |
deeplook/svglib | svglib/svglib.py | AttributeConverter.getAllAttributes | def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict | python | def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict | [
"def",
"getAllAttributes",
"(",
"self",
",",
"svgNode",
")",
":",
"dict",
"=",
"{",
"}",
"if",
"node_name",
"(",
"svgNode",
".",
"getparent",
"(",
")",
")",
"==",
"'g'",
":",
"dict",
".",
"update",
"(",
"self",
".",
"getAllAttributes",
"(",
"svgNode",
... | Return a dictionary of all attributes of svgNode or those inherited by it. | [
"Return",
"a",
"dictionary",
"of",
"all",
"attributes",
"of",
"svgNode",
"or",
"those",
"inherited",
"by",
"it",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L225-L242 | train | 199,627 |
deeplook/svglib | svglib/svglib.py | AttributeConverter.convertTransform | def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result | python | def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result | [
"def",
"convertTransform",
"(",
"self",
",",
"svgAttr",
")",
":",
"line",
"=",
"svgAttr",
".",
"strip",
"(",
")",
"ops",
"=",
"line",
"[",
":",
"]",
"brackets",
"=",
"[",
"]",
"indices",
"=",
"[",
"]",
"for",
"i",
",",
"lin",
"in",
"enumerate",
"... | Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))] | [
"Parse",
"transform",
"attribute",
"string",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L248-L287 | train | 199,628 |
deeplook/svglib | svglib/svglib.py | Svg2RlgAttributeConverter.convertLength | def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length | python | def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length | [
"def",
"convertLength",
"(",
"self",
",",
"svgAttr",
",",
"percentOf",
"=",
"100",
",",
"em_base",
"=",
"12",
")",
":",
"text",
"=",
"svgAttr",
"if",
"not",
"text",
":",
"return",
"0.0",
"if",
"' '",
"in",
"text",
".",
"replace",
"(",
"','",
",",
"... | Convert length to points. | [
"Convert",
"length",
"to",
"points",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L305-L334 | train | 199,629 |
deeplook/svglib | svglib/svglib.py | Svg2RlgAttributeConverter.convertLengthList | def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)] | python | def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)] | [
"def",
"convertLengthList",
"(",
"self",
",",
"svgAttr",
")",
":",
"return",
"[",
"self",
".",
"convertLength",
"(",
"a",
")",
"for",
"a",
"in",
"self",
".",
"split_attr_list",
"(",
"svgAttr",
")",
"]"
] | Convert a list of lengths. | [
"Convert",
"a",
"list",
"of",
"lengths",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L336-L338 | train | 199,630 |
deeplook/svglib | svglib/svglib.py | Svg2RlgAttributeConverter.convertColor | def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None | python | def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None | [
"def",
"convertColor",
"(",
"self",
",",
"svgAttr",
")",
":",
"# fix it: most likely all \"web colors\" are allowed",
"predefined",
"=",
"\"aqua black blue fuchsia gray green lime maroon navy \"",
"predefined",
"=",
"predefined",
"+",
"\"olive orange purple red silver teal white yell... | Convert string to a RL color object. | [
"Convert",
"string",
"to",
"a",
"RL",
"color",
"object",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L349-L384 | train | 199,631 |
deeplook/svglib | svglib/svglib.py | SvgRenderer.get_clippath | def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path | python | def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path | [
"def",
"get_clippath",
"(",
"self",
",",
"node",
")",
":",
"def",
"get_path_from_node",
"(",
"node",
")",
":",
"for",
"child",
"in",
"node",
".",
"getchildren",
"(",
")",
":",
"if",
"node_name",
"(",
"child",
")",
"==",
"'path'",
":",
"group",
"=",
"... | Return the clipping Path object referenced by the node 'clip-path'
attribute, if any. | [
"Return",
"the",
"clipping",
"Path",
"object",
"referenced",
"by",
"the",
"node",
"clip",
"-",
"path",
"attribute",
"if",
"any",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L626-L648 | train | 199,632 |
deeplook/svglib | svglib/svglib.py | Svg2RlgShapeConverter.applyTransformOnGroup | def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values)) | python | def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values)) | [
"def",
"applyTransformOnGroup",
"(",
"self",
",",
"transform",
",",
"group",
")",
":",
"tr",
"=",
"self",
".",
"attrConverter",
".",
"convertTransform",
"(",
"transform",
")",
"for",
"op",
",",
"values",
"in",
"tr",
":",
"if",
"op",
"==",
"\"scale\"",
":... | Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>) | [
"Apply",
"an",
"SVG",
"transformation",
"to",
"a",
"RL",
"Group",
"shape",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1231-L1267 | train | 199,633 |
deeplook/svglib | svglib/svglib.py | Svg2RlgShapeConverter.applyStyleOnShape | def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity | python | def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity | [
"def",
"applyStyleOnShape",
"(",
"self",
",",
"shape",
",",
"node",
",",
"only_explicit",
"=",
"False",
")",
":",
"# RLG-specific: all RLG shapes",
"\"Apply style attributes of a sequence of nodes to an RL shape.\"",
"# tuple format: (svgAttr, rlgAttr, converter, default)",
"mappin... | Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied. | [
"Apply",
"styles",
"from",
"an",
"SVG",
"element",
"to",
"an",
"RLG",
"shape",
".",
"If",
"only_explicit",
"is",
"True",
"only",
"attributes",
"really",
"present",
"are",
"applied",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1269-L1321 | train | 199,634 |
deeplook/svglib | svglib/utils.py | split_floats | def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res | python | def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res | [
"def",
"split_floats",
"(",
"op",
",",
"min_num",
",",
"value",
")",
":",
"floats",
"=",
"[",
"float",
"(",
"seq",
")",
"for",
"seq",
"in",
"re",
".",
"findall",
"(",
"r'(-?\\d*\\.?\\d*(?:e[+-]\\d+)?)'",
",",
"value",
")",
"if",
"seq",
"]",
"res",
"=",... | Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]] | [
"Split",
"value",
"a",
"list",
"of",
"numbers",
"as",
"a",
"string",
"to",
"a",
"list",
"of",
"float",
"numbers",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L11-L25 | train | 199,635 |
deeplook/svglib | svglib/utils.py | normalise_svg_path | def normalise_svg_path(attr):
"""Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
"""
# operator codes mapped to the minimum number of expected arguments
ops = {
'A': 7, 'a': 7,
'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4,
'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1,
'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0,
}
op_keys = ops.keys()
# do some preprocessing
result = []
groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I)
op = None
for item in groups:
if item.strip() == '':
continue
if item in op_keys:
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
if item == 'M' and item == op:
op = 'L'
elif item == 'm' and item == op:
op = 'l'
else:
op = item
if ops[op] == 0: # Z, z
result.extend([op, []])
else:
result.extend(split_floats(op, ops[op], item))
op = result[-2] # Remember last op
return result | python | def normalise_svg_path(attr):
"""Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
"""
# operator codes mapped to the minimum number of expected arguments
ops = {
'A': 7, 'a': 7,
'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4,
'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1,
'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0,
}
op_keys = ops.keys()
# do some preprocessing
result = []
groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I)
op = None
for item in groups:
if item.strip() == '':
continue
if item in op_keys:
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
if item == 'M' and item == op:
op = 'L'
elif item == 'm' and item == op:
op = 'l'
else:
op = item
if ops[op] == 0: # Z, z
result.extend([op, []])
else:
result.extend(split_floats(op, ops[op], item))
op = result[-2] # Remember last op
return result | [
"def",
"normalise_svg_path",
"(",
"attr",
")",
":",
"# operator codes mapped to the minimum number of expected arguments",
"ops",
"=",
"{",
"'A'",
":",
"7",
",",
"'a'",
":",
"7",
",",
"'Q'",
":",
"4",
",",
"'q'",
":",
"4",
",",
"'T'",
":",
"2",
",",
"'t'",... | Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []] | [
"Normalise",
"SVG",
"path",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L28-L72 | train | 199,636 |
deeplook/svglib | svglib/utils.py | convert_quadratic_to_cubic_path | def convert_quadratic_to_cubic_path(q0, q1, q2):
"""
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
"""
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3 | python | def convert_quadratic_to_cubic_path(q0, q1, q2):
"""
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
"""
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3 | [
"def",
"convert_quadratic_to_cubic_path",
"(",
"q0",
",",
"q1",
",",
"q2",
")",
":",
"c0",
"=",
"q0",
"c1",
"=",
"(",
"q0",
"[",
"0",
"]",
"+",
"2.",
"/",
"3",
"*",
"(",
"q1",
"[",
"0",
"]",
"-",
"q0",
"[",
"0",
"]",
")",
",",
"q0",
"[",
... | Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one. | [
"Convert",
"a",
"quadratic",
"Bezier",
"curve",
"through",
"q0",
"q1",
"q2",
"to",
"a",
"cubic",
"one",
"."
] | 859f9f461f1041018af3e6f507bb4c0616b04fbb | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L75-L83 | train | 199,637 |
stephenmcd/django-socketio | django_socketio/clients.py | client_start | def client_start(request, socket, context):
"""
Adds the client triple to CLIENTS.
"""
CLIENTS[socket.session.session_id] = (request, socket, context) | python | def client_start(request, socket, context):
"""
Adds the client triple to CLIENTS.
"""
CLIENTS[socket.session.session_id] = (request, socket, context) | [
"def",
"client_start",
"(",
"request",
",",
"socket",
",",
"context",
")",
":",
"CLIENTS",
"[",
"socket",
".",
"session",
".",
"session_id",
"]",
"=",
"(",
"request",
",",
"socket",
",",
"context",
")"
] | Adds the client triple to CLIENTS. | [
"Adds",
"the",
"client",
"triple",
"to",
"CLIENTS",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/clients.py#L11-L15 | train | 199,638 |
stephenmcd/django-socketio | django_socketio/clients.py | client_end | def client_end(request, socket, context):
"""
Handles cleanup when a session ends for the given client triple.
Sends unsubscribe and finish events, actually unsubscribes from
any channels subscribed to, and removes the client triple from
CLIENTS.
"""
# Send the unsubscribe event prior to actually unsubscribing, so
# that the finish event can still match channels if applicable.
for channel in socket.channels:
events.on_unsubscribe.send(request, socket, context, channel)
events.on_finish.send(request, socket, context)
# Actually unsubscribe to cleanup channel data.
for channel in socket.channels[:]:
socket.unsubscribe(channel)
# Remove the client.
del CLIENTS[socket.session.session_id] | python | def client_end(request, socket, context):
"""
Handles cleanup when a session ends for the given client triple.
Sends unsubscribe and finish events, actually unsubscribes from
any channels subscribed to, and removes the client triple from
CLIENTS.
"""
# Send the unsubscribe event prior to actually unsubscribing, so
# that the finish event can still match channels if applicable.
for channel in socket.channels:
events.on_unsubscribe.send(request, socket, context, channel)
events.on_finish.send(request, socket, context)
# Actually unsubscribe to cleanup channel data.
for channel in socket.channels[:]:
socket.unsubscribe(channel)
# Remove the client.
del CLIENTS[socket.session.session_id] | [
"def",
"client_end",
"(",
"request",
",",
"socket",
",",
"context",
")",
":",
"# Send the unsubscribe event prior to actually unsubscribing, so",
"# that the finish event can still match channels if applicable.",
"for",
"channel",
"in",
"socket",
".",
"channels",
":",
"events",... | Handles cleanup when a session ends for the given client triple.
Sends unsubscribe and finish events, actually unsubscribes from
any channels subscribed to, and removes the client triple from
CLIENTS. | [
"Handles",
"cleanup",
"when",
"a",
"session",
"ends",
"for",
"the",
"given",
"client",
"triple",
".",
"Sends",
"unsubscribe",
"and",
"finish",
"events",
"actually",
"unsubscribes",
"from",
"any",
"channels",
"subscribed",
"to",
"and",
"removes",
"the",
"client",... | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/clients.py#L18-L34 | train | 199,639 |
stephenmcd/django-socketio | django_socketio/clients.py | client_end_all | def client_end_all():
"""
Performs cleanup on all clients - called by runserver_socketio
when the server is shut down or reloaded.
"""
for request, socket, context in CLIENTS.values()[:]:
client_end(request, socket, context) | python | def client_end_all():
"""
Performs cleanup on all clients - called by runserver_socketio
when the server is shut down or reloaded.
"""
for request, socket, context in CLIENTS.values()[:]:
client_end(request, socket, context) | [
"def",
"client_end_all",
"(",
")",
":",
"for",
"request",
",",
"socket",
",",
"context",
"in",
"CLIENTS",
".",
"values",
"(",
")",
"[",
":",
"]",
":",
"client_end",
"(",
"request",
",",
"socket",
",",
"context",
")"
] | Performs cleanup on all clients - called by runserver_socketio
when the server is shut down or reloaded. | [
"Performs",
"cleanup",
"on",
"all",
"clients",
"-",
"called",
"by",
"runserver_socketio",
"when",
"the",
"server",
"is",
"shut",
"down",
"or",
"reloaded",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/clients.py#L37-L43 | train | 199,640 |
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.subscribe | def subscribe(self, channel):
"""
Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True.
"""
if channel in self.channels:
return False
CHANNELS[channel].append(self.socket.session.session_id)
self.channels.append(channel)
return True | python | def subscribe(self, channel):
"""
Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True.
"""
if channel in self.channels:
return False
CHANNELS[channel].append(self.socket.session.session_id)
self.channels.append(channel)
return True | [
"def",
"subscribe",
"(",
"self",
",",
"channel",
")",
":",
"if",
"channel",
"in",
"self",
".",
"channels",
":",
"return",
"False",
"CHANNELS",
"[",
"channel",
"]",
".",
"append",
"(",
"self",
".",
"socket",
".",
"session",
".",
"session_id",
")",
"self... | Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True. | [
"Add",
"the",
"channel",
"to",
"this",
"socket",
"s",
"channels",
"and",
"to",
"the",
"list",
"of",
"subscribed",
"session",
"IDs",
"for",
"the",
"channel",
".",
"Return",
"False",
"if",
"already",
"subscribed",
"otherwise",
"True",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L22-L32 | train | 199,641 |
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.unsubscribe | def unsubscribe(self, channel):
"""
Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True.
"""
try:
CHANNELS[channel].remove(self.socket.session.session_id)
self.channels.remove(channel)
except ValueError:
return False
return True | python | def unsubscribe(self, channel):
"""
Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True.
"""
try:
CHANNELS[channel].remove(self.socket.session.session_id)
self.channels.remove(channel)
except ValueError:
return False
return True | [
"def",
"unsubscribe",
"(",
"self",
",",
"channel",
")",
":",
"try",
":",
"CHANNELS",
"[",
"channel",
"]",
".",
"remove",
"(",
"self",
".",
"socket",
".",
"session",
".",
"session_id",
")",
"self",
".",
"channels",
".",
"remove",
"(",
"channel",
")",
... | Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True. | [
"Remove",
"the",
"channel",
"from",
"this",
"socket",
"s",
"channels",
"and",
"from",
"the",
"list",
"of",
"subscribed",
"session",
"IDs",
"for",
"the",
"channel",
".",
"Return",
"False",
"if",
"not",
"subscribed",
"otherwise",
"True",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L34-L45 | train | 199,642 |
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.broadcast_channel | def broadcast_channel(self, message, channel=None):
"""
Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to.
"""
if channel is None:
channels = self.channels
else:
channels = [channel]
for channel in channels:
for subscriber in CHANNELS[channel]:
if subscriber != self.socket.session.session_id:
session = self.socket.handler.server.sessions[subscriber]
self._write(message, session) | python | def broadcast_channel(self, message, channel=None):
"""
Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to.
"""
if channel is None:
channels = self.channels
else:
channels = [channel]
for channel in channels:
for subscriber in CHANNELS[channel]:
if subscriber != self.socket.session.session_id:
session = self.socket.handler.server.sessions[subscriber]
self._write(message, session) | [
"def",
"broadcast_channel",
"(",
"self",
",",
"message",
",",
"channel",
"=",
"None",
")",
":",
"if",
"channel",
"is",
"None",
":",
"channels",
"=",
"self",
".",
"channels",
"else",
":",
"channels",
"=",
"[",
"channel",
"]",
"for",
"channel",
"in",
"ch... | Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to. | [
"Send",
"the",
"given",
"message",
"to",
"all",
"subscribers",
"for",
"the",
"channel",
"given",
".",
"If",
"no",
"channel",
"is",
"given",
"send",
"to",
"the",
"subscribers",
"for",
"all",
"the",
"channels",
"that",
"this",
"socket",
"is",
"subscribed",
"... | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L47-L61 | train | 199,643 |
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.send_and_broadcast_channel | def send_and_broadcast_channel(self, message, channel=None):
"""
Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself.
"""
self.send(message)
self.broadcast_channel(message, channel) | python | def send_and_broadcast_channel(self, message, channel=None):
"""
Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself.
"""
self.send(message)
self.broadcast_channel(message, channel) | [
"def",
"send_and_broadcast_channel",
"(",
"self",
",",
"message",
",",
"channel",
"=",
"None",
")",
":",
"self",
".",
"send",
"(",
"message",
")",
"self",
".",
"broadcast_channel",
"(",
"message",
",",
"channel",
")"
] | Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself. | [
"Shortcut",
"for",
"a",
"socket",
"to",
"broadcast",
"to",
"all",
"sockets",
"subscribed",
"to",
"a",
"channel",
"and",
"itself",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L70-L76 | train | 199,644 |
stephenmcd/django-socketio | django_socketio/example_project/chat/events.py | message | def message(request, socket, context, message):
"""
Event handler for a room receiving a message. First validates a
joining user's name and sends them the list of users.
"""
room = get_object_or_404(ChatRoom, id=message["room"])
if message["action"] == "start":
name = strip_tags(message["name"])
user, created = room.users.get_or_create(name=name)
if not created:
socket.send({"action": "in-use"})
else:
context["user"] = user
users = [u.name for u in room.users.exclude(id=user.id)]
socket.send({"action": "started", "users": users})
user.session = socket.session.session_id
user.save()
joined = {"action": "join", "name": user.name, "id": user.id}
socket.send_and_broadcast_channel(joined)
else:
try:
user = context["user"]
except KeyError:
return
if message["action"] == "message":
message["message"] = strip_tags(message["message"])
message["name"] = user.name
socket.send_and_broadcast_channel(message) | python | def message(request, socket, context, message):
"""
Event handler for a room receiving a message. First validates a
joining user's name and sends them the list of users.
"""
room = get_object_or_404(ChatRoom, id=message["room"])
if message["action"] == "start":
name = strip_tags(message["name"])
user, created = room.users.get_or_create(name=name)
if not created:
socket.send({"action": "in-use"})
else:
context["user"] = user
users = [u.name for u in room.users.exclude(id=user.id)]
socket.send({"action": "started", "users": users})
user.session = socket.session.session_id
user.save()
joined = {"action": "join", "name": user.name, "id": user.id}
socket.send_and_broadcast_channel(joined)
else:
try:
user = context["user"]
except KeyError:
return
if message["action"] == "message":
message["message"] = strip_tags(message["message"])
message["name"] = user.name
socket.send_and_broadcast_channel(message) | [
"def",
"message",
"(",
"request",
",",
"socket",
",",
"context",
",",
"message",
")",
":",
"room",
"=",
"get_object_or_404",
"(",
"ChatRoom",
",",
"id",
"=",
"message",
"[",
"\"room\"",
"]",
")",
"if",
"message",
"[",
"\"action\"",
"]",
"==",
"\"start\""... | Event handler for a room receiving a message. First validates a
joining user's name and sends them the list of users. | [
"Event",
"handler",
"for",
"a",
"room",
"receiving",
"a",
"message",
".",
"First",
"validates",
"a",
"joining",
"user",
"s",
"name",
"and",
"sends",
"them",
"the",
"list",
"of",
"users",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/events.py#L10-L37 | train | 199,645 |
stephenmcd/django-socketio | django_socketio/example_project/chat/events.py | finish | def finish(request, socket, context):
"""
Event handler for a socket session ending in a room. Broadcast
the user leaving and delete them from the DB.
"""
try:
user = context["user"]
except KeyError:
return
left = {"action": "leave", "name": user.name, "id": user.id}
socket.broadcast_channel(left)
user.delete() | python | def finish(request, socket, context):
"""
Event handler for a socket session ending in a room. Broadcast
the user leaving and delete them from the DB.
"""
try:
user = context["user"]
except KeyError:
return
left = {"action": "leave", "name": user.name, "id": user.id}
socket.broadcast_channel(left)
user.delete() | [
"def",
"finish",
"(",
"request",
",",
"socket",
",",
"context",
")",
":",
"try",
":",
"user",
"=",
"context",
"[",
"\"user\"",
"]",
"except",
"KeyError",
":",
"return",
"left",
"=",
"{",
"\"action\"",
":",
"\"leave\"",
",",
"\"name\"",
":",
"user",
"."... | Event handler for a socket session ending in a room. Broadcast
the user leaving and delete them from the DB. | [
"Event",
"handler",
"for",
"a",
"socket",
"session",
"ending",
"in",
"a",
"room",
".",
"Broadcast",
"the",
"user",
"leaving",
"and",
"delete",
"them",
"from",
"the",
"DB",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/events.py#L41-L52 | train | 199,646 |
stephenmcd/django-socketio | django_socketio/utils.py | send | def send(session_id, message):
"""
Send a message to the socket for the given session ID.
"""
try:
socket = CLIENTS[session_id][1]
except KeyError:
raise NoSocket("There is no socket with the session ID: " + session_id)
socket.send(message) | python | def send(session_id, message):
"""
Send a message to the socket for the given session ID.
"""
try:
socket = CLIENTS[session_id][1]
except KeyError:
raise NoSocket("There is no socket with the session ID: " + session_id)
socket.send(message) | [
"def",
"send",
"(",
"session_id",
",",
"message",
")",
":",
"try",
":",
"socket",
"=",
"CLIENTS",
"[",
"session_id",
"]",
"[",
"1",
"]",
"except",
"KeyError",
":",
"raise",
"NoSocket",
"(",
"\"There is no socket with the session ID: \"",
"+",
"session_id",
")"... | Send a message to the socket for the given session ID. | [
"Send",
"a",
"message",
"to",
"the",
"socket",
"for",
"the",
"given",
"session",
"ID",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L14-L22 | train | 199,647 |
stephenmcd/django-socketio | django_socketio/utils.py | broadcast | def broadcast(message):
"""
Find the first socket and use it to broadcast to all sockets
including the socket itself.
"""
try:
socket = CLIENTS.values()[0][1]
except IndexError:
raise NoSocket("There are no clients.")
socket.send_and_broadcast(message) | python | def broadcast(message):
"""
Find the first socket and use it to broadcast to all sockets
including the socket itself.
"""
try:
socket = CLIENTS.values()[0][1]
except IndexError:
raise NoSocket("There are no clients.")
socket.send_and_broadcast(message) | [
"def",
"broadcast",
"(",
"message",
")",
":",
"try",
":",
"socket",
"=",
"CLIENTS",
".",
"values",
"(",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"except",
"IndexError",
":",
"raise",
"NoSocket",
"(",
"\"There are no clients.\"",
")",
"socket",
".",
"send_and_b... | Find the first socket and use it to broadcast to all sockets
including the socket itself. | [
"Find",
"the",
"first",
"socket",
"and",
"use",
"it",
"to",
"broadcast",
"to",
"all",
"sockets",
"including",
"the",
"socket",
"itself",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L25-L34 | train | 199,648 |
stephenmcd/django-socketio | django_socketio/utils.py | broadcast_channel | def broadcast_channel(message, channel):
"""
Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself.
"""
try:
socket = CLIENTS[CHANNELS.get(channel, [])[0]][1]
except (IndexError, KeyError):
raise NoSocket("There are no clients on the channel: " + channel)
socket.send_and_broadcast_channel(message, channel) | python | def broadcast_channel(message, channel):
"""
Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself.
"""
try:
socket = CLIENTS[CHANNELS.get(channel, [])[0]][1]
except (IndexError, KeyError):
raise NoSocket("There are no clients on the channel: " + channel)
socket.send_and_broadcast_channel(message, channel) | [
"def",
"broadcast_channel",
"(",
"message",
",",
"channel",
")",
":",
"try",
":",
"socket",
"=",
"CLIENTS",
"[",
"CHANNELS",
".",
"get",
"(",
"channel",
",",
"[",
"]",
")",
"[",
"0",
"]",
"]",
"[",
"1",
"]",
"except",
"(",
"IndexError",
",",
"KeyEr... | Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself. | [
"Find",
"the",
"first",
"socket",
"for",
"the",
"given",
"channel",
"and",
"use",
"it",
"to",
"broadcast",
"to",
"the",
"channel",
"including",
"the",
"socket",
"itself",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L37-L46 | train | 199,649 |
stephenmcd/django-socketio | django_socketio/utils.py | format_log | def format_log(request, message_type, message):
"""
Formats a log message similar to gevent's pywsgi request logging.
"""
from django_socketio.settings import MESSAGE_LOG_FORMAT
if MESSAGE_LOG_FORMAT is None:
return None
now = datetime.now().replace(microsecond=0)
args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now)
return (MESSAGE_LOG_FORMAT % args) + "\n" | python | def format_log(request, message_type, message):
"""
Formats a log message similar to gevent's pywsgi request logging.
"""
from django_socketio.settings import MESSAGE_LOG_FORMAT
if MESSAGE_LOG_FORMAT is None:
return None
now = datetime.now().replace(microsecond=0)
args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now)
return (MESSAGE_LOG_FORMAT % args) + "\n" | [
"def",
"format_log",
"(",
"request",
",",
"message_type",
",",
"message",
")",
":",
"from",
"django_socketio",
".",
"settings",
"import",
"MESSAGE_LOG_FORMAT",
"if",
"MESSAGE_LOG_FORMAT",
"is",
"None",
":",
"return",
"None",
"now",
"=",
"datetime",
".",
"now",
... | Formats a log message similar to gevent's pywsgi request logging. | [
"Formats",
"a",
"log",
"message",
"similar",
"to",
"gevent",
"s",
"pywsgi",
"request",
"logging",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L49-L58 | train | 199,650 |
stephenmcd/django-socketio | django_socketio/management/commands/runserver_socketio.py | Command.get_handler | def get_handler(self, *args, **options):
"""
Returns the django.contrib.staticfiles handler.
"""
handler = WSGIHandler()
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
except ImportError:
return handler
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if (settings.DEBUG and use_static_handler or
(use_static_handler and insecure_serving)):
handler = StaticFilesHandler(handler)
return handler | python | def get_handler(self, *args, **options):
"""
Returns the django.contrib.staticfiles handler.
"""
handler = WSGIHandler()
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
except ImportError:
return handler
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if (settings.DEBUG and use_static_handler or
(use_static_handler and insecure_serving)):
handler = StaticFilesHandler(handler)
return handler | [
"def",
"get_handler",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"handler",
"=",
"WSGIHandler",
"(",
")",
"try",
":",
"from",
"django",
".",
"contrib",
".",
"staticfiles",
".",
"handlers",
"import",
"StaticFilesHandler",
"except",
... | Returns the django.contrib.staticfiles handler. | [
"Returns",
"the",
"django",
".",
"contrib",
".",
"staticfiles",
"handler",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/management/commands/runserver_socketio.py#L72-L86 | train | 199,651 |
stephenmcd/django-socketio | django_socketio/events.py | Event.send | def send(self, request, socket, context, *args):
"""
When an event is sent, run all relevant handlers. Relevant
handlers are those without a channel pattern when the given
socket is not subscribed to any particular channel, or the
handlers with a channel pattern that matches any of the
channels that the given socket is subscribed to.
In the case of subscribe/unsubscribe, match the channel arg
being sent to the channel pattern.
"""
for handler, pattern in self.handlers:
no_channel = not pattern and not socket.channels
if self.name.endswith("subscribe") and pattern:
matches = [pattern.match(args[0])]
else:
matches = [pattern.match(c) for c in socket.channels if pattern]
if no_channel or filter(None, matches):
handler(request, socket, context, *args) | python | def send(self, request, socket, context, *args):
"""
When an event is sent, run all relevant handlers. Relevant
handlers are those without a channel pattern when the given
socket is not subscribed to any particular channel, or the
handlers with a channel pattern that matches any of the
channels that the given socket is subscribed to.
In the case of subscribe/unsubscribe, match the channel arg
being sent to the channel pattern.
"""
for handler, pattern in self.handlers:
no_channel = not pattern and not socket.channels
if self.name.endswith("subscribe") and pattern:
matches = [pattern.match(args[0])]
else:
matches = [pattern.match(c) for c in socket.channels if pattern]
if no_channel or filter(None, matches):
handler(request, socket, context, *args) | [
"def",
"send",
"(",
"self",
",",
"request",
",",
"socket",
",",
"context",
",",
"*",
"args",
")",
":",
"for",
"handler",
",",
"pattern",
"in",
"self",
".",
"handlers",
":",
"no_channel",
"=",
"not",
"pattern",
"and",
"not",
"socket",
".",
"channels",
... | When an event is sent, run all relevant handlers. Relevant
handlers are those without a channel pattern when the given
socket is not subscribed to any particular channel, or the
handlers with a channel pattern that matches any of the
channels that the given socket is subscribed to.
In the case of subscribe/unsubscribe, match the channel arg
being sent to the channel pattern. | [
"When",
"an",
"event",
"is",
"sent",
"run",
"all",
"relevant",
"handlers",
".",
"Relevant",
"handlers",
"are",
"those",
"without",
"a",
"channel",
"pattern",
"when",
"the",
"given",
"socket",
"is",
"not",
"subscribed",
"to",
"any",
"particular",
"channel",
"... | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/events.py#L53-L71 | train | 199,652 |
stephenmcd/django-socketio | django_socketio/example_project/chat/views.py | rooms | def rooms(request, template="rooms.html"):
"""
Homepage - lists all rooms.
"""
context = {"rooms": ChatRoom.objects.all()}
return render(request, template, context) | python | def rooms(request, template="rooms.html"):
"""
Homepage - lists all rooms.
"""
context = {"rooms": ChatRoom.objects.all()}
return render(request, template, context) | [
"def",
"rooms",
"(",
"request",
",",
"template",
"=",
"\"rooms.html\"",
")",
":",
"context",
"=",
"{",
"\"rooms\"",
":",
"ChatRoom",
".",
"objects",
".",
"all",
"(",
")",
"}",
"return",
"render",
"(",
"request",
",",
"template",
",",
"context",
")"
] | Homepage - lists all rooms. | [
"Homepage",
"-",
"lists",
"all",
"rooms",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/views.py#L9-L14 | train | 199,653 |
stephenmcd/django-socketio | django_socketio/example_project/chat/views.py | room | def room(request, slug, template="room.html"):
"""
Show a room.
"""
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render(request, template, context) | python | def room(request, slug, template="room.html"):
"""
Show a room.
"""
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render(request, template, context) | [
"def",
"room",
"(",
"request",
",",
"slug",
",",
"template",
"=",
"\"room.html\"",
")",
":",
"context",
"=",
"{",
"\"room\"",
":",
"get_object_or_404",
"(",
"ChatRoom",
",",
"slug",
"=",
"slug",
")",
"}",
"return",
"render",
"(",
"request",
",",
"templat... | Show a room. | [
"Show",
"a",
"room",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/views.py#L17-L22 | train | 199,654 |
stephenmcd/django-socketio | django_socketio/example_project/chat/views.py | create | def create(request):
"""
Handles post from the "Add room" form on the homepage, and
redirects to the new room.
"""
name = request.POST.get("name")
if name:
room, created = ChatRoom.objects.get_or_create(name=name)
return redirect(room)
return redirect(rooms) | python | def create(request):
"""
Handles post from the "Add room" form on the homepage, and
redirects to the new room.
"""
name = request.POST.get("name")
if name:
room, created = ChatRoom.objects.get_or_create(name=name)
return redirect(room)
return redirect(rooms) | [
"def",
"create",
"(",
"request",
")",
":",
"name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"name\"",
")",
"if",
"name",
":",
"room",
",",
"created",
"=",
"ChatRoom",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"name",
")",
"return... | Handles post from the "Add room" form on the homepage, and
redirects to the new room. | [
"Handles",
"post",
"from",
"the",
"Add",
"room",
"form",
"on",
"the",
"homepage",
"and",
"redirects",
"to",
"the",
"new",
"room",
"."
] | b704f912551829a3bcf15872ba0e1baf81dea106 | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/views.py#L25-L34 | train | 199,655 |
hannorein/rebound | rebound/data.py | add_outer_solar_system | def add_outer_solar_system(sim):
"""
Add the planet of the outer Solar System as a test problem.
Data taken from NASA Horizons.
"""
Gfac = 1./0.01720209895 # Gaussian constant
if sim.G is not None:
Gfac *= math.sqrt(sim.G)
sim.add( m=1.00000597682, x=-4.06428567034226e-3, y=-6.08813756435987e-3, z=-1.66162304225834e-6, vx=+6.69048890636161e-6*Gfac, vy=-6.33922479583593e-6*Gfac, vz=-3.13202145590767e-9*Gfac ) # Sun
sim.add( m=1./1047.355, x=+3.40546614227466e+0, y=+3.62978190075864e+0, z=+3.42386261766577e-2, vx=-5.59797969310664e-3*Gfac, vy=+5.51815399480116e-3*Gfac, vz=-2.66711392865591e-6*Gfac ) # Jupiter
sim.add( m=1./3501.6, x=+6.60801554403466e+0, y=+6.38084674585064e+0, z=-1.36145963724542e-1, vx=-4.17354020307064e-3*Gfac, vy=+3.99723751748116e-3*Gfac, vz=+1.67206320571441e-5*Gfac ) # Saturn
sim.add( m=1./22869., x=+1.11636331405597e+1, y=+1.60373479057256e+1, z=+3.61783279369958e-1, vx=-3.25884806151064e-3*Gfac, vy=+2.06438412905916e-3*Gfac, vz=-2.17699042180559e-5*Gfac ) # Uranus
sim.add( m=1./19314., x=-3.01777243405203e+1, y=+1.91155314998064e+0, z=-1.53887595621042e-1, vx=-2.17471785045538e-4*Gfac, vy=-3.11361111025884e-3*Gfac, vz=+3.58344705491441e-5*Gfac ) # Neptune
sim.add( m=7.4074074e-09, x=-2.13858977531573e+1, y=+3.20719104739886e+1, z=+2.49245689556096e+0, vx=-1.76936577252484e-3*Gfac, vy=-2.06720938381724e-3*Gfac, vz=+6.58091931493844e-4*Gfac ) | python | def add_outer_solar_system(sim):
"""
Add the planet of the outer Solar System as a test problem.
Data taken from NASA Horizons.
"""
Gfac = 1./0.01720209895 # Gaussian constant
if sim.G is not None:
Gfac *= math.sqrt(sim.G)
sim.add( m=1.00000597682, x=-4.06428567034226e-3, y=-6.08813756435987e-3, z=-1.66162304225834e-6, vx=+6.69048890636161e-6*Gfac, vy=-6.33922479583593e-6*Gfac, vz=-3.13202145590767e-9*Gfac ) # Sun
sim.add( m=1./1047.355, x=+3.40546614227466e+0, y=+3.62978190075864e+0, z=+3.42386261766577e-2, vx=-5.59797969310664e-3*Gfac, vy=+5.51815399480116e-3*Gfac, vz=-2.66711392865591e-6*Gfac ) # Jupiter
sim.add( m=1./3501.6, x=+6.60801554403466e+0, y=+6.38084674585064e+0, z=-1.36145963724542e-1, vx=-4.17354020307064e-3*Gfac, vy=+3.99723751748116e-3*Gfac, vz=+1.67206320571441e-5*Gfac ) # Saturn
sim.add( m=1./22869., x=+1.11636331405597e+1, y=+1.60373479057256e+1, z=+3.61783279369958e-1, vx=-3.25884806151064e-3*Gfac, vy=+2.06438412905916e-3*Gfac, vz=-2.17699042180559e-5*Gfac ) # Uranus
sim.add( m=1./19314., x=-3.01777243405203e+1, y=+1.91155314998064e+0, z=-1.53887595621042e-1, vx=-2.17471785045538e-4*Gfac, vy=-3.11361111025884e-3*Gfac, vz=+3.58344705491441e-5*Gfac ) # Neptune
sim.add( m=7.4074074e-09, x=-2.13858977531573e+1, y=+3.20719104739886e+1, z=+2.49245689556096e+0, vx=-1.76936577252484e-3*Gfac, vy=-2.06720938381724e-3*Gfac, vz=+6.58091931493844e-4*Gfac ) | [
"def",
"add_outer_solar_system",
"(",
"sim",
")",
":",
"Gfac",
"=",
"1.",
"/",
"0.01720209895",
"# Gaussian constant ",
"if",
"sim",
".",
"G",
"is",
"not",
"None",
":",
"Gfac",
"*=",
"math",
".",
"sqrt",
"(",
"sim",
".",
"G",
")",
"sim",
".",
"add",
... | Add the planet of the outer Solar System as a test problem.
Data taken from NASA Horizons. | [
"Add",
"the",
"planet",
"of",
"the",
"outer",
"Solar",
"System",
"as",
"a",
"test",
"problem",
".",
"Data",
"taken",
"from",
"NASA",
"Horizons",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/data.py#L10-L24 | train | 199,656 |
hannorein/rebound | rebound/plotting.py | get_color | def get_color(color):
"""
Takes a string for a color name defined in matplotlib and returns of a 3-tuple of RGB values.
Will simply return passed value if it's a tuple of length three.
Parameters
----------
color : str
Name of matplotlib color to calculate RGB values for.
"""
if isinstance(color, tuple) and len(color) == 3: # already a tuple of RGB values
return color
try:
import matplotlib.colors as mplcolors
except:
raise ImportError("Error importing matplotlib. If running from within a jupyter notebook, try calling '%matplotlib inline' beforehand.")
try:
hexcolor = mplcolors.cnames[color]
except KeyError:
raise AttributeError("Color not recognized in matplotlib.")
hexcolor = hexcolor.lstrip('#')
lv = len(hexcolor)
return tuple(int(hexcolor[i:i + lv // 3], 16)/255. for i in range(0, lv, lv // 3)) | python | def get_color(color):
"""
Takes a string for a color name defined in matplotlib and returns of a 3-tuple of RGB values.
Will simply return passed value if it's a tuple of length three.
Parameters
----------
color : str
Name of matplotlib color to calculate RGB values for.
"""
if isinstance(color, tuple) and len(color) == 3: # already a tuple of RGB values
return color
try:
import matplotlib.colors as mplcolors
except:
raise ImportError("Error importing matplotlib. If running from within a jupyter notebook, try calling '%matplotlib inline' beforehand.")
try:
hexcolor = mplcolors.cnames[color]
except KeyError:
raise AttributeError("Color not recognized in matplotlib.")
hexcolor = hexcolor.lstrip('#')
lv = len(hexcolor)
return tuple(int(hexcolor[i:i + lv // 3], 16)/255. for i in range(0, lv, lv // 3)) | [
"def",
"get_color",
"(",
"color",
")",
":",
"if",
"isinstance",
"(",
"color",
",",
"tuple",
")",
"and",
"len",
"(",
"color",
")",
"==",
"3",
":",
"# already a tuple of RGB values",
"return",
"color",
"try",
":",
"import",
"matplotlib",
".",
"colors",
"as",... | Takes a string for a color name defined in matplotlib and returns of a 3-tuple of RGB values.
Will simply return passed value if it's a tuple of length three.
Parameters
----------
color : str
Name of matplotlib color to calculate RGB values for. | [
"Takes",
"a",
"string",
"for",
"a",
"color",
"name",
"defined",
"in",
"matplotlib",
"and",
"returns",
"of",
"a",
"3",
"-",
"tuple",
"of",
"RGB",
"values",
".",
"Will",
"simply",
"return",
"passed",
"value",
"if",
"it",
"s",
"a",
"tuple",
"of",
"length"... | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/plotting.py#L96-L122 | train | 199,657 |
hannorein/rebound | rebound/plotting.py | fading_line | def fading_line(x, y, color='black', alpha_initial=1., alpha_final=0., glow=False, **kwargs):
"""
Returns a matplotlib LineCollection connecting the points in the x and y lists, with a single color and alpha varying from alpha_initial to alpha_final along the line.
Can pass any kwargs you can pass to LineCollection, like linewidgth.
Parameters
----------
x : list or array of floats for the positions on the (plot's) x axis
y : list or array of floats for the positions on the (plot's) y axis
color : matplotlib color for the line. Can also pass a 3-tuple of RGB values (default: 'black')
alpha_initial: Limiting value of alpha to use at the beginning of the arrays.
alpha_final: Limiting value of alpha to use at the end of the arrays.
"""
try:
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
except:
raise ImportError("Error importing matplotlib and/or numpy. Plotting functions not available. If running from within a jupyter notebook, try calling '%matplotlib inline' beforehand.")
if glow:
glow = False
kwargs["lw"] = 1
fl1 = fading_line(x, y, color, alpha_initial, alpha_final, glow=False, **kwargs)
kwargs["lw"] = 2
alpha_initial *= 0.5
alpha_final *= 0.5
fl2 = fading_line(x, y, color, alpha_initial, alpha_final, glow=False, **kwargs)
kwargs["lw"] = 6
alpha_initial *= 0.5
alpha_final *= 0.5
fl3 = fading_line(x, y, color, alpha_initial, alpha_final, glow=False, **kwargs)
return [fl3,fl2,fl1]
color = get_color(color)
cdict = {'red': ((0.,color[0],color[0]),(1.,color[0],color[0])),
'green': ((0.,color[1],color[1]),(1.,color[1],color[1])),
'blue': ((0.,color[2],color[2]),(1.,color[2],color[2])),
'alpha': ((0.,alpha_initial, alpha_initial), (1., alpha_final, alpha_final))}
Npts = len(x)
if len(y) != Npts:
raise AttributeError("x and y must have same dimension.")
segments = np.zeros((Npts-1,2,2))
segments[0][0] = [x[0], y[0]]
for i in range(1,Npts-1):
pt = [x[i], y[i]]
segments[i-1][1] = pt
segments[i][0] = pt
segments[-1][1] = [x[-1], y[-1]]
individual_cm = LinearSegmentedColormap('indv1', cdict)
lc = LineCollection(segments, cmap=individual_cm, **kwargs)
lc.set_array(np.linspace(0.,1.,len(segments)))
return lc | python | def fading_line(x, y, color='black', alpha_initial=1., alpha_final=0., glow=False, **kwargs):
"""
Returns a matplotlib LineCollection connecting the points in the x and y lists, with a single color and alpha varying from alpha_initial to alpha_final along the line.
Can pass any kwargs you can pass to LineCollection, like linewidgth.
Parameters
----------
x : list or array of floats for the positions on the (plot's) x axis
y : list or array of floats for the positions on the (plot's) y axis
color : matplotlib color for the line. Can also pass a 3-tuple of RGB values (default: 'black')
alpha_initial: Limiting value of alpha to use at the beginning of the arrays.
alpha_final: Limiting value of alpha to use at the end of the arrays.
"""
try:
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
except:
raise ImportError("Error importing matplotlib and/or numpy. Plotting functions not available. If running from within a jupyter notebook, try calling '%matplotlib inline' beforehand.")
if glow:
glow = False
kwargs["lw"] = 1
fl1 = fading_line(x, y, color, alpha_initial, alpha_final, glow=False, **kwargs)
kwargs["lw"] = 2
alpha_initial *= 0.5
alpha_final *= 0.5
fl2 = fading_line(x, y, color, alpha_initial, alpha_final, glow=False, **kwargs)
kwargs["lw"] = 6
alpha_initial *= 0.5
alpha_final *= 0.5
fl3 = fading_line(x, y, color, alpha_initial, alpha_final, glow=False, **kwargs)
return [fl3,fl2,fl1]
color = get_color(color)
cdict = {'red': ((0.,color[0],color[0]),(1.,color[0],color[0])),
'green': ((0.,color[1],color[1]),(1.,color[1],color[1])),
'blue': ((0.,color[2],color[2]),(1.,color[2],color[2])),
'alpha': ((0.,alpha_initial, alpha_initial), (1., alpha_final, alpha_final))}
Npts = len(x)
if len(y) != Npts:
raise AttributeError("x and y must have same dimension.")
segments = np.zeros((Npts-1,2,2))
segments[0][0] = [x[0], y[0]]
for i in range(1,Npts-1):
pt = [x[i], y[i]]
segments[i-1][1] = pt
segments[i][0] = pt
segments[-1][1] = [x[-1], y[-1]]
individual_cm = LinearSegmentedColormap('indv1', cdict)
lc = LineCollection(segments, cmap=individual_cm, **kwargs)
lc.set_array(np.linspace(0.,1.,len(segments)))
return lc | [
"def",
"fading_line",
"(",
"x",
",",
"y",
",",
"color",
"=",
"'black'",
",",
"alpha_initial",
"=",
"1.",
",",
"alpha_final",
"=",
"0.",
",",
"glow",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"from",
"matplotlib",
".",
"collections"... | Returns a matplotlib LineCollection connecting the points in the x and y lists, with a single color and alpha varying from alpha_initial to alpha_final along the line.
Can pass any kwargs you can pass to LineCollection, like linewidgth.
Parameters
----------
x : list or array of floats for the positions on the (plot's) x axis
y : list or array of floats for the positions on the (plot's) y axis
color : matplotlib color for the line. Can also pass a 3-tuple of RGB values (default: 'black')
alpha_initial: Limiting value of alpha to use at the beginning of the arrays.
alpha_final: Limiting value of alpha to use at the end of the arrays. | [
"Returns",
"a",
"matplotlib",
"LineCollection",
"connecting",
"the",
"points",
"in",
"the",
"x",
"and",
"y",
"lists",
"with",
"a",
"single",
"color",
"and",
"alpha",
"varying",
"from",
"alpha_initial",
"to",
"alpha_final",
"along",
"the",
"line",
".",
"Can",
... | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/plotting.py#L124-L180 | train | 199,658 |
hannorein/rebound | rebound/simulationarchive.py | SimulationArchive._getSnapshotIndex | def _getSnapshotIndex(self, t):
"""
Return the index for the snapshot just before t
"""
if t>self.tmax or t<self.tmin:
raise ValueError("Requested time outside of baseline stored in binary file.")
# Bisection method
l = 0
r = len(self)
while True:
bi = l+(r-l)//2
if self.t[bi]>t:
r = bi
else:
l = bi
if r-1<=l:
bi = l
break
return bi, self.t[bi] | python | def _getSnapshotIndex(self, t):
"""
Return the index for the snapshot just before t
"""
if t>self.tmax or t<self.tmin:
raise ValueError("Requested time outside of baseline stored in binary file.")
# Bisection method
l = 0
r = len(self)
while True:
bi = l+(r-l)//2
if self.t[bi]>t:
r = bi
else:
l = bi
if r-1<=l:
bi = l
break
return bi, self.t[bi] | [
"def",
"_getSnapshotIndex",
"(",
"self",
",",
"t",
")",
":",
"if",
"t",
">",
"self",
".",
"tmax",
"or",
"t",
"<",
"self",
".",
"tmin",
":",
"raise",
"ValueError",
"(",
"\"Requested time outside of baseline stored in binary file.\"",
")",
"# Bisection method",
"l... | Return the index for the snapshot just before t | [
"Return",
"the",
"index",
"for",
"the",
"snapshot",
"just",
"before",
"t"
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulationarchive.py#L154-L172 | train | 199,659 |
hannorein/rebound | rebound/simulationarchive.py | SimulationArchive.getSimulations | def getSimulations(self, times, **kwargs):
"""
A generator to quickly access many simulations.
The arguments are the same as for `getSimulation`.
"""
for t in times:
yield self.getSimulation(t, **kwargs) | python | def getSimulations(self, times, **kwargs):
"""
A generator to quickly access many simulations.
The arguments are the same as for `getSimulation`.
"""
for t in times:
yield self.getSimulation(t, **kwargs) | [
"def",
"getSimulations",
"(",
"self",
",",
"times",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"t",
"in",
"times",
":",
"yield",
"self",
".",
"getSimulation",
"(",
"t",
",",
"*",
"*",
"kwargs",
")"
] | A generator to quickly access many simulations.
The arguments are the same as for `getSimulation`. | [
"A",
"generator",
"to",
"quickly",
"access",
"many",
"simulations",
".",
"The",
"arguments",
"are",
"the",
"same",
"as",
"for",
"getSimulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulationarchive.py#L246-L252 | train | 199,660 |
hannorein/rebound | rebound/particle.py | Particle.copy | def copy(self):
"""
Returns a deep copy of the particle. The particle is not added to any simulation by default.
"""
np = Particle()
memmove(byref(np), byref(self), sizeof(self))
return np | python | def copy(self):
"""
Returns a deep copy of the particle. The particle is not added to any simulation by default.
"""
np = Particle()
memmove(byref(np), byref(self), sizeof(self))
return np | [
"def",
"copy",
"(",
"self",
")",
":",
"np",
"=",
"Particle",
"(",
")",
"memmove",
"(",
"byref",
"(",
"np",
")",
",",
"byref",
"(",
"self",
")",
",",
"sizeof",
"(",
"self",
")",
")",
"return",
"np"
] | Returns a deep copy of the particle. The particle is not added to any simulation by default. | [
"Returns",
"a",
"deep",
"copy",
"of",
"the",
"particle",
".",
"The",
"particle",
"is",
"not",
"added",
"to",
"any",
"simulation",
"by",
"default",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/particle.py#L384-L390 | train | 199,661 |
hannorein/rebound | rebound/widget.py | Widget.takeScreenshot | def takeScreenshot(self, times=None, prefix="./screenshot", resetCounter=False, archive=None,mode="snapshot"):
"""
Take one or more screenshots of the widget and save the images to a file.
The images can be used to create a video.
This function cannot be called multiple times within one cell.
Note: this is a new feature and might not work on all systems.
It was tested on python 2.7.10 and 3.5.2 on MacOSX.
Parameters
----------
times : (float, list), optional
If this argument is not given a screenshot of the widget will be made
as it is (without integrating the simulation). If a float is given, then the
simulation will be integrated to that time and then a screenshot will
be taken. If a list of floats is given, the simulation will be integrated
to each time specified in the array. A separate screenshot for
each time will be saved.
prefix : (str), optional
This string will be part of the output filename for each image.
Follow by a five digit integer and the suffix .png. By default the
prefix is './screenshot' which outputs images in the current
directory with the filnames screenshot00000.png, screenshot00001.png...
Note that the prefix can include a directory.
resetCounter : (bool), optional
Resets the output counter to 0.
archive : (rebound.SimulationArchive), optional
Use a REBOUND SimulationArchive. Thus, instead of integratating the
Simulation from the current time, it will use the SimulationArchive
to load a snapshot. See examples for usage.
mode : (string), optional
Mode to use when querying the SimulationArchive. See SimulationArchive
documentation for details. By default the value is "snapshot".
Examples
--------
First, create a simulation and widget. All of the following can go in
one cell.
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> w = sim.getWidget()
>>> w
The widget should show up. To take a screenshot, simply call
>>> w.takeScreenshot()
A new file with the name screenshot00000.png will appear in the
current directory.
Note that the takeScreenshot command needs to be in a separate cell,
i.e. after you see the widget.
You can pass an array of times to the function. This allows you to
take multiple screenshots, for example to create a movie,
>>> times = [0,10,100]
>>> w.takeScreenshot(times)
"""
self.archive = archive
if resetCounter:
self.screenshotcountall = 0
self.screenshotprefix = prefix
self.screenshotcount = 0
self.overlay = "REBOUND"
self.screenshot = ""
if archive is None:
if times is None:
times = self.simp.contents.t
try:
# List
len(times)
except:
# Float:
times = [times]
self.times = times
self.observe(savescreenshot,names="screenshot")
self.simp.contents.integrate(times[0])
self.screenshotcount += 1 # triggers first screenshot
else:
if times is None:
raise ValueError("Need times argument for archive mode.")
try:
len(times)
except:
raise ValueError("Need a list of times for archive mode.")
self.times = times
self.mode = mode
self.observe(savescreenshot,names="screenshot")
sim = archive.getSimulation(times[0],mode=mode)
self.refresh(pointer(sim))
self.screenshotcount += 1 | python | def takeScreenshot(self, times=None, prefix="./screenshot", resetCounter=False, archive=None,mode="snapshot"):
"""
Take one or more screenshots of the widget and save the images to a file.
The images can be used to create a video.
This function cannot be called multiple times within one cell.
Note: this is a new feature and might not work on all systems.
It was tested on python 2.7.10 and 3.5.2 on MacOSX.
Parameters
----------
times : (float, list), optional
If this argument is not given a screenshot of the widget will be made
as it is (without integrating the simulation). If a float is given, then the
simulation will be integrated to that time and then a screenshot will
be taken. If a list of floats is given, the simulation will be integrated
to each time specified in the array. A separate screenshot for
each time will be saved.
prefix : (str), optional
This string will be part of the output filename for each image.
Follow by a five digit integer and the suffix .png. By default the
prefix is './screenshot' which outputs images in the current
directory with the filnames screenshot00000.png, screenshot00001.png...
Note that the prefix can include a directory.
resetCounter : (bool), optional
Resets the output counter to 0.
archive : (rebound.SimulationArchive), optional
Use a REBOUND SimulationArchive. Thus, instead of integratating the
Simulation from the current time, it will use the SimulationArchive
to load a snapshot. See examples for usage.
mode : (string), optional
Mode to use when querying the SimulationArchive. See SimulationArchive
documentation for details. By default the value is "snapshot".
Examples
--------
First, create a simulation and widget. All of the following can go in
one cell.
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> w = sim.getWidget()
>>> w
The widget should show up. To take a screenshot, simply call
>>> w.takeScreenshot()
A new file with the name screenshot00000.png will appear in the
current directory.
Note that the takeScreenshot command needs to be in a separate cell,
i.e. after you see the widget.
You can pass an array of times to the function. This allows you to
take multiple screenshots, for example to create a movie,
>>> times = [0,10,100]
>>> w.takeScreenshot(times)
"""
self.archive = archive
if resetCounter:
self.screenshotcountall = 0
self.screenshotprefix = prefix
self.screenshotcount = 0
self.overlay = "REBOUND"
self.screenshot = ""
if archive is None:
if times is None:
times = self.simp.contents.t
try:
# List
len(times)
except:
# Float:
times = [times]
self.times = times
self.observe(savescreenshot,names="screenshot")
self.simp.contents.integrate(times[0])
self.screenshotcount += 1 # triggers first screenshot
else:
if times is None:
raise ValueError("Need times argument for archive mode.")
try:
len(times)
except:
raise ValueError("Need a list of times for archive mode.")
self.times = times
self.mode = mode
self.observe(savescreenshot,names="screenshot")
sim = archive.getSimulation(times[0],mode=mode)
self.refresh(pointer(sim))
self.screenshotcount += 1 | [
"def",
"takeScreenshot",
"(",
"self",
",",
"times",
"=",
"None",
",",
"prefix",
"=",
"\"./screenshot\"",
",",
"resetCounter",
"=",
"False",
",",
"archive",
"=",
"None",
",",
"mode",
"=",
"\"snapshot\"",
")",
":",
"self",
".",
"archive",
"=",
"archive",
"... | Take one or more screenshots of the widget and save the images to a file.
The images can be used to create a video.
This function cannot be called multiple times within one cell.
Note: this is a new feature and might not work on all systems.
It was tested on python 2.7.10 and 3.5.2 on MacOSX.
Parameters
----------
times : (float, list), optional
If this argument is not given a screenshot of the widget will be made
as it is (without integrating the simulation). If a float is given, then the
simulation will be integrated to that time and then a screenshot will
be taken. If a list of floats is given, the simulation will be integrated
to each time specified in the array. A separate screenshot for
each time will be saved.
prefix : (str), optional
This string will be part of the output filename for each image.
Follow by a five digit integer and the suffix .png. By default the
prefix is './screenshot' which outputs images in the current
directory with the filnames screenshot00000.png, screenshot00001.png...
Note that the prefix can include a directory.
resetCounter : (bool), optional
Resets the output counter to 0.
archive : (rebound.SimulationArchive), optional
Use a REBOUND SimulationArchive. Thus, instead of integratating the
Simulation from the current time, it will use the SimulationArchive
to load a snapshot. See examples for usage.
mode : (string), optional
Mode to use when querying the SimulationArchive. See SimulationArchive
documentation for details. By default the value is "snapshot".
Examples
--------
First, create a simulation and widget. All of the following can go in
one cell.
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> w = sim.getWidget()
>>> w
The widget should show up. To take a screenshot, simply call
>>> w.takeScreenshot()
A new file with the name screenshot00000.png will appear in the
current directory.
Note that the takeScreenshot command needs to be in a separate cell,
i.e. after you see the widget.
You can pass an array of times to the function. This allows you to
take multiple screenshots, for example to create a movie,
>>> times = [0,10,100]
>>> w.takeScreenshot(times) | [
"Take",
"one",
"or",
"more",
"screenshots",
"of",
"the",
"widget",
"and",
"save",
"the",
"images",
"to",
"a",
"file",
".",
"The",
"images",
"can",
"be",
"used",
"to",
"create",
"a",
"video",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/widget.py#L545-L641 | train | 199,662 |
hannorein/rebound | rebound/simulation.py | reb_simulation_integrator_whfast.coordinates | def coordinates(self):
"""
Get or set the internal coordinate system.
Available coordinate systems are:
- ``'jacobi'`` (default)
- ``'democraticheliocentric'``
- ``'whds'``
"""
i = self._coordinates
for name, _i in COORDINATES.items():
if i==_i:
return name
return i | python | def coordinates(self):
"""
Get or set the internal coordinate system.
Available coordinate systems are:
- ``'jacobi'`` (default)
- ``'democraticheliocentric'``
- ``'whds'``
"""
i = self._coordinates
for name, _i in COORDINATES.items():
if i==_i:
return name
return i | [
"def",
"coordinates",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"_coordinates",
"for",
"name",
",",
"_i",
"in",
"COORDINATES",
".",
"items",
"(",
")",
":",
"if",
"i",
"==",
"_i",
":",
"return",
"name",
"return",
"i"
] | Get or set the internal coordinate system.
Available coordinate systems are:
- ``'jacobi'`` (default)
- ``'democraticheliocentric'``
- ``'whds'`` | [
"Get",
"or",
"set",
"the",
"internal",
"coordinate",
"system",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L176-L190 | train | 199,663 |
hannorein/rebound | rebound/simulation.py | Simulation.getWidget | def getWidget(self,**kwargs):
"""
Wrapper function that returns a new widget attached to this simulation.
Widgets provide real-time 3D visualizations from within an Jupyter notebook.
See the Widget class for more details on the possible arguments.
Arguments
---------
All arguments passed to this wrapper function will be passed to /Widget class.
Returns
-------
A rebound.Widget object.
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> sim.getWidget()
"""
from .widget import Widget # ondemand
from ipywidgets import DOMWidget
from IPython.display import display, HTML
if not hasattr(self, '_widgets'):
self._widgets = []
def display_heartbeat(simp):
for w in self._widgets:
w.refresh(simp,isauto=1)
self.visualization = VISUALIZATIONS["webgl"]
clibrebound.reb_display_init_data(byref(self));
self._dhbf = AFF(display_heartbeat)
self._display_heartbeat = self._dhbf
display(HTML(Widget.getClientCode())) # HACK! Javascript should go into custom.js
newWidget = Widget(self,**kwargs)
self._widgets.append(newWidget)
newWidget.refresh(isauto=0)
return newWidget | python | def getWidget(self,**kwargs):
"""
Wrapper function that returns a new widget attached to this simulation.
Widgets provide real-time 3D visualizations from within an Jupyter notebook.
See the Widget class for more details on the possible arguments.
Arguments
---------
All arguments passed to this wrapper function will be passed to /Widget class.
Returns
-------
A rebound.Widget object.
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> sim.getWidget()
"""
from .widget import Widget # ondemand
from ipywidgets import DOMWidget
from IPython.display import display, HTML
if not hasattr(self, '_widgets'):
self._widgets = []
def display_heartbeat(simp):
for w in self._widgets:
w.refresh(simp,isauto=1)
self.visualization = VISUALIZATIONS["webgl"]
clibrebound.reb_display_init_data(byref(self));
self._dhbf = AFF(display_heartbeat)
self._display_heartbeat = self._dhbf
display(HTML(Widget.getClientCode())) # HACK! Javascript should go into custom.js
newWidget = Widget(self,**kwargs)
self._widgets.append(newWidget)
newWidget.refresh(isauto=0)
return newWidget | [
"def",
"getWidget",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"widget",
"import",
"Widget",
"# ondemand",
"from",
"ipywidgets",
"import",
"DOMWidget",
"from",
"IPython",
".",
"display",
"import",
"display",
",",
"HTML",
"if",
"not",
"has... | Wrapper function that returns a new widget attached to this simulation.
Widgets provide real-time 3D visualizations from within an Jupyter notebook.
See the Widget class for more details on the possible arguments.
Arguments
---------
All arguments passed to this wrapper function will be passed to /Widget class.
Returns
-------
A rebound.Widget object.
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> sim.getWidget() | [
"Wrapper",
"function",
"that",
"returns",
"a",
"new",
"widget",
"attached",
"to",
"this",
"simulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L392-L433 | train | 199,664 |
hannorein/rebound | rebound/simulation.py | Simulation.refreshWidgets | def refreshWidgets(self):
"""
This function manually refreshed all widgets attached to this simulation.
You want to call this function if any particle data has been manually changed.
"""
if hasattr(self, '_widgets'):
for w in self._widgets:
w.refresh(isauto=0)
else:
raise RuntimeError("No widgets found") | python | def refreshWidgets(self):
"""
This function manually refreshed all widgets attached to this simulation.
You want to call this function if any particle data has been manually changed.
"""
if hasattr(self, '_widgets'):
for w in self._widgets:
w.refresh(isauto=0)
else:
raise RuntimeError("No widgets found") | [
"def",
"refreshWidgets",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_widgets'",
")",
":",
"for",
"w",
"in",
"self",
".",
"_widgets",
":",
"w",
".",
"refresh",
"(",
"isauto",
"=",
"0",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
... | This function manually refreshed all widgets attached to this simulation.
You want to call this function if any particle data has been manually changed. | [
"This",
"function",
"manually",
"refreshed",
"all",
"widgets",
"attached",
"to",
"this",
"simulation",
".",
"You",
"want",
"to",
"call",
"this",
"function",
"if",
"any",
"particle",
"data",
"has",
"been",
"manually",
"changed",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L435-L445 | train | 199,665 |
hannorein/rebound | rebound/simulation.py | Simulation.status | def status(self):
"""
Prints a summary of the current status
of the simulation.
"""
from rebound import __version__, __build__
s= ""
s += "---------------------------------\n"
s += "REBOUND version: \t%s\n" %__version__
s += "REBOUND built on: \t%s\n" %__build__
s += "Number of particles: \t%d\n" %self.N
s += "Selected integrator: \t" + self.integrator + "\n"
s += "Simulation time: \t%.16e\n" %self.t
s += "Current timestep: \t%f\n" %self.dt
if self.N>0:
s += "---------------------------------\n"
for p in self.particles:
s += str(p) + "\n"
s += "---------------------------------"
print(s) | python | def status(self):
"""
Prints a summary of the current status
of the simulation.
"""
from rebound import __version__, __build__
s= ""
s += "---------------------------------\n"
s += "REBOUND version: \t%s\n" %__version__
s += "REBOUND built on: \t%s\n" %__build__
s += "Number of particles: \t%d\n" %self.N
s += "Selected integrator: \t" + self.integrator + "\n"
s += "Simulation time: \t%.16e\n" %self.t
s += "Current timestep: \t%f\n" %self.dt
if self.N>0:
s += "---------------------------------\n"
for p in self.particles:
s += str(p) + "\n"
s += "---------------------------------"
print(s) | [
"def",
"status",
"(",
"self",
")",
":",
"from",
"rebound",
"import",
"__version__",
",",
"__build__",
"s",
"=",
"\"\"",
"s",
"+=",
"\"---------------------------------\\n\"",
"s",
"+=",
"\"REBOUND version: \\t%s\\n\"",
"%",
"__version__",
"s",
"+=",
"\"REBOUND b... | Prints a summary of the current status
of the simulation. | [
"Prints",
"a",
"summary",
"of",
"the",
"current",
"status",
"of",
"the",
"simulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L632-L651 | train | 199,666 |
hannorein/rebound | rebound/simulation.py | Simulation.integrator | def integrator(self):
"""
Get or set the intergrator module.
Available integrators are:
- ``'ias15'`` (default)
- ``'whfast'``
- ``'sei'``
- ``'leapfrog'``
- ``'janus'``
- ``'mercurius'``
- ``'bs'``
- ``'none'``
Check the online documentation for a full description of each of the integrators.
"""
i = self._integrator
for name, _i in INTEGRATORS.items():
if i==_i:
return name
return i | python | def integrator(self):
"""
Get or set the intergrator module.
Available integrators are:
- ``'ias15'`` (default)
- ``'whfast'``
- ``'sei'``
- ``'leapfrog'``
- ``'janus'``
- ``'mercurius'``
- ``'bs'``
- ``'none'``
Check the online documentation for a full description of each of the integrators.
"""
i = self._integrator
for name, _i in INTEGRATORS.items():
if i==_i:
return name
return i | [
"def",
"integrator",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"_integrator",
"for",
"name",
",",
"_i",
"in",
"INTEGRATORS",
".",
"items",
"(",
")",
":",
"if",
"i",
"==",
"_i",
":",
"return",
"name",
"return",
"i"
] | Get or set the intergrator module.
Available integrators are:
- ``'ias15'`` (default)
- ``'whfast'``
- ``'sei'``
- ``'leapfrog'``
- ``'janus'``
- ``'mercurius'``
- ``'bs'``
- ``'none'``
Check the online documentation for a full description of each of the integrators. | [
"Get",
"or",
"set",
"the",
"intergrator",
"module",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L796-L817 | train | 199,667 |
hannorein/rebound | rebound/simulation.py | Simulation.boundary | def boundary(self):
"""
Get or set the boundary module.
Available boundary modules are:
- ``'none'`` (default)
- ``'open'``
- ``'periodic'``
- ``'shear'``
Check the online documentation for a full description of each of the modules.
"""
i = self._boundary
for name, _i in BOUNDARIES.items():
if i==_i:
return name
return i | python | def boundary(self):
"""
Get or set the boundary module.
Available boundary modules are:
- ``'none'`` (default)
- ``'open'``
- ``'periodic'``
- ``'shear'``
Check the online documentation for a full description of each of the modules.
"""
i = self._boundary
for name, _i in BOUNDARIES.items():
if i==_i:
return name
return i | [
"def",
"boundary",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"_boundary",
"for",
"name",
",",
"_i",
"in",
"BOUNDARIES",
".",
"items",
"(",
")",
":",
"if",
"i",
"==",
"_i",
":",
"return",
"name",
"return",
"i"
] | Get or set the boundary module.
Available boundary modules are:
- ``'none'`` (default)
- ``'open'``
- ``'periodic'``
- ``'shear'``
Check the online documentation for a full description of each of the modules. | [
"Get",
"or",
"set",
"the",
"boundary",
"module",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L842-L859 | train | 199,668 |
hannorein/rebound | rebound/simulation.py | Simulation.gravity | def gravity(self):
"""
Get or set the gravity module.
Available gravity modules are:
- ``'none'``
- ``'basic'`` (default)
- ``'compensated'``
- ``'tree'``
Check the online documentation for a full description of each of the modules.
"""
i = self._gravity
for name, _i in GRAVITIES.items():
if i==_i:
return name
return i | python | def gravity(self):
"""
Get or set the gravity module.
Available gravity modules are:
- ``'none'``
- ``'basic'`` (default)
- ``'compensated'``
- ``'tree'``
Check the online documentation for a full description of each of the modules.
"""
i = self._gravity
for name, _i in GRAVITIES.items():
if i==_i:
return name
return i | [
"def",
"gravity",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"_gravity",
"for",
"name",
",",
"_i",
"in",
"GRAVITIES",
".",
"items",
"(",
")",
":",
"if",
"i",
"==",
"_i",
":",
"return",
"name",
"return",
"i"
] | Get or set the gravity module.
Available gravity modules are:
- ``'none'``
- ``'basic'`` (default)
- ``'compensated'``
- ``'tree'``
Check the online documentation for a full description of each of the modules. | [
"Get",
"or",
"set",
"the",
"gravity",
"module",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L872-L889 | train | 199,669 |
hannorein/rebound | rebound/simulation.py | Simulation.collision | def collision(self):
"""
Get or set the collision module.
Available collision modules are:
- ``'none'`` (default)
- ``'direct'``
- ``'tree'``
- ``'mercurius'``
- ``'direct'``
Check the online documentation for a full description of each of the modules.
"""
i = self._collision
for name, _i in COLLISIONS.items():
if i==_i:
return name
return i | python | def collision(self):
"""
Get or set the collision module.
Available collision modules are:
- ``'none'`` (default)
- ``'direct'``
- ``'tree'``
- ``'mercurius'``
- ``'direct'``
Check the online documentation for a full description of each of the modules.
"""
i = self._collision
for name, _i in COLLISIONS.items():
if i==_i:
return name
return i | [
"def",
"collision",
"(",
"self",
")",
":",
"i",
"=",
"self",
".",
"_collision",
"for",
"name",
",",
"_i",
"in",
"COLLISIONS",
".",
"items",
"(",
")",
":",
"if",
"i",
"==",
"_i",
":",
"return",
"name",
"return",
"i"
] | Get or set the collision module.
Available collision modules are:
- ``'none'`` (default)
- ``'direct'``
- ``'tree'``
- ``'mercurius'``
- ``'direct'``
Check the online documentation for a full description of each of the modules. | [
"Get",
"or",
"set",
"the",
"collision",
"module",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L902-L920 | train | 199,670 |
hannorein/rebound | rebound/simulation.py | Simulation.add_variation | def add_variation(self,order=1,first_order=None, first_order_2=None, testparticle=-1):
"""
This function adds a set of variational particles to the simulation.
If there are N real particles in the simulation, this functions adds N additional variational
particles. To see how many particles (real and variational) are in a simulation, use ``'sim.N'``.
To see how many variational particles are in a simulation use ``'sim.N_var'``.
Currently Leapfrog, WHFast and IAS15 support first order variational equations. IAS15 also
supports second order variational equations.
Parameters
----------
order : integer, optional
By default the function adds a set of first order variational particles to the simulation. Set this flag to 2 for second order.
first_order : Variation, optional
Second order variational equations depend on their corresponding first order variational equations.
This parameter expects the Variation object corresponding to the first order variational equations.
first_order_2 : Variation, optional
Same as first_order. But allows to set two different indicies to calculate off-diagonal elements.
If omitted, then first_order will be used for both first order equations.
testparticle : int, optional
If set to a value >= 0, then only one variational particle will be added and be treated as a test particle.
Returns
-------
Returns Variation object (a copy--you can only modify it through its particles property or vary method).
"""
cur_var_config_N = self.var_config_N
if order==1:
index = clibrebound.reb_add_var_1st_order(byref(self),c_int(testparticle))
elif order==2:
if first_order is None:
raise AttributeError("Please specify corresponding first order variational equations when initializing second order variational equations.")
if first_order_2 is None:
first_order_2 = first_order
index = clibrebound.reb_add_var_2nd_order(byref(self),c_int(testparticle),c_int(first_order.index),c_int(first_order_2.index))
else:
raise AttributeError("Only variational equations of first and second order are supported.")
# Need a copy because location of original might shift if more variations added
s = Variation.from_buffer_copy(self.var_config[cur_var_config_N])
return s | python | def add_variation(self,order=1,first_order=None, first_order_2=None, testparticle=-1):
"""
This function adds a set of variational particles to the simulation.
If there are N real particles in the simulation, this functions adds N additional variational
particles. To see how many particles (real and variational) are in a simulation, use ``'sim.N'``.
To see how many variational particles are in a simulation use ``'sim.N_var'``.
Currently Leapfrog, WHFast and IAS15 support first order variational equations. IAS15 also
supports second order variational equations.
Parameters
----------
order : integer, optional
By default the function adds a set of first order variational particles to the simulation. Set this flag to 2 for second order.
first_order : Variation, optional
Second order variational equations depend on their corresponding first order variational equations.
This parameter expects the Variation object corresponding to the first order variational equations.
first_order_2 : Variation, optional
Same as first_order. But allows to set two different indicies to calculate off-diagonal elements.
If omitted, then first_order will be used for both first order equations.
testparticle : int, optional
If set to a value >= 0, then only one variational particle will be added and be treated as a test particle.
Returns
-------
Returns Variation object (a copy--you can only modify it through its particles property or vary method).
"""
cur_var_config_N = self.var_config_N
if order==1:
index = clibrebound.reb_add_var_1st_order(byref(self),c_int(testparticle))
elif order==2:
if first_order is None:
raise AttributeError("Please specify corresponding first order variational equations when initializing second order variational equations.")
if first_order_2 is None:
first_order_2 = first_order
index = clibrebound.reb_add_var_2nd_order(byref(self),c_int(testparticle),c_int(first_order.index),c_int(first_order_2.index))
else:
raise AttributeError("Only variational equations of first and second order are supported.")
# Need a copy because location of original might shift if more variations added
s = Variation.from_buffer_copy(self.var_config[cur_var_config_N])
return s | [
"def",
"add_variation",
"(",
"self",
",",
"order",
"=",
"1",
",",
"first_order",
"=",
"None",
",",
"first_order_2",
"=",
"None",
",",
"testparticle",
"=",
"-",
"1",
")",
":",
"cur_var_config_N",
"=",
"self",
".",
"var_config_N",
"if",
"order",
"==",
"1",... | This function adds a set of variational particles to the simulation.
If there are N real particles in the simulation, this functions adds N additional variational
particles. To see how many particles (real and variational) are in a simulation, use ``'sim.N'``.
To see how many variational particles are in a simulation use ``'sim.N_var'``.
Currently Leapfrog, WHFast and IAS15 support first order variational equations. IAS15 also
supports second order variational equations.
Parameters
----------
order : integer, optional
By default the function adds a set of first order variational particles to the simulation. Set this flag to 2 for second order.
first_order : Variation, optional
Second order variational equations depend on their corresponding first order variational equations.
This parameter expects the Variation object corresponding to the first order variational equations.
first_order_2 : Variation, optional
Same as first_order. But allows to set two different indicies to calculate off-diagonal elements.
If omitted, then first_order will be used for both first order equations.
testparticle : int, optional
If set to a value >= 0, then only one variational particle will be added and be treated as a test particle.
Returns
-------
Returns Variation object (a copy--you can only modify it through its particles property or vary method). | [
"This",
"function",
"adds",
"a",
"set",
"of",
"variational",
"particles",
"to",
"the",
"simulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1011-L1055 | train | 199,671 |
hannorein/rebound | rebound/simulation.py | Simulation.init_megno | def init_megno(self, seed=None):
"""
This function initialises the chaos indicator MEGNO particles and enables their integration.
MEGNO is short for Mean Exponential Growth of Nearby orbits. It can be used to test
if a system is chaotic or not. In the backend, the integrator is integrating an additional set
of particles using the variational equation. Note that variational equations are better
suited for this than shadow particles. MEGNO is currently only supported in the IAS15
and WHFast integrators.
This function also needs to be called if you are interested in the Lyapunov exponent as it is
calculate with the help of MEGNO. See Rein and Tamayo 2015 for details on the implementation.
For more information on MENGO see e.g. http://dx.doi.org/10.1051/0004-6361:20011189
"""
if seed is None:
clibrebound.reb_tools_megno_init(byref(self))
else:
clibrebound.reb_tools_megno_init_seed(byref(self), c_uint(seed)) | python | def init_megno(self, seed=None):
"""
This function initialises the chaos indicator MEGNO particles and enables their integration.
MEGNO is short for Mean Exponential Growth of Nearby orbits. It can be used to test
if a system is chaotic or not. In the backend, the integrator is integrating an additional set
of particles using the variational equation. Note that variational equations are better
suited for this than shadow particles. MEGNO is currently only supported in the IAS15
and WHFast integrators.
This function also needs to be called if you are interested in the Lyapunov exponent as it is
calculate with the help of MEGNO. See Rein and Tamayo 2015 for details on the implementation.
For more information on MENGO see e.g. http://dx.doi.org/10.1051/0004-6361:20011189
"""
if seed is None:
clibrebound.reb_tools_megno_init(byref(self))
else:
clibrebound.reb_tools_megno_init_seed(byref(self), c_uint(seed)) | [
"def",
"init_megno",
"(",
"self",
",",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
"is",
"None",
":",
"clibrebound",
".",
"reb_tools_megno_init",
"(",
"byref",
"(",
"self",
")",
")",
"else",
":",
"clibrebound",
".",
"reb_tools_megno_init_seed",
"(",
"byre... | This function initialises the chaos indicator MEGNO particles and enables their integration.
MEGNO is short for Mean Exponential Growth of Nearby orbits. It can be used to test
if a system is chaotic or not. In the backend, the integrator is integrating an additional set
of particles using the variational equation. Note that variational equations are better
suited for this than shadow particles. MEGNO is currently only supported in the IAS15
and WHFast integrators.
This function also needs to be called if you are interested in the Lyapunov exponent as it is
calculate with the help of MEGNO. See Rein and Tamayo 2015 for details on the implementation.
For more information on MENGO see e.g. http://dx.doi.org/10.1051/0004-6361:20011189 | [
"This",
"function",
"initialises",
"the",
"chaos",
"indicator",
"MEGNO",
"particles",
"and",
"enables",
"their",
"integration",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1058-L1076 | train | 199,672 |
hannorein/rebound | rebound/simulation.py | Simulation.remove | def remove(self, index=None, hash=None, keepSorted=True):
"""
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
"""
if index is not None:
clibrebound.reb_remove(byref(self), index, keepSorted)
if hash is not None:
hash_types = c_uint32, c_uint, c_ulong
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
int_types = int,
else:
string_types = basestring,
int_types = int, long
if isinstance(hash, string_types):
clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted)
elif isinstance(hash, int_types):
clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted)
elif isinstance(hash, hash_types):
clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted)
if hasattr(self, '_widgets'):
self._display_heartbeat(pointer(self))
self.process_messages() | python | def remove(self, index=None, hash=None, keepSorted=True):
"""
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
"""
if index is not None:
clibrebound.reb_remove(byref(self), index, keepSorted)
if hash is not None:
hash_types = c_uint32, c_uint, c_ulong
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
int_types = int,
else:
string_types = basestring,
int_types = int, long
if isinstance(hash, string_types):
clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted)
elif isinstance(hash, int_types):
clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted)
elif isinstance(hash, hash_types):
clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted)
if hasattr(self, '_widgets'):
self._display_heartbeat(pointer(self))
self.process_messages() | [
"def",
"remove",
"(",
"self",
",",
"index",
"=",
"None",
",",
"hash",
"=",
"None",
",",
"keepSorted",
"=",
"True",
")",
":",
"if",
"index",
"is",
"not",
"None",
":",
"clibrebound",
".",
"reb_remove",
"(",
"byref",
"(",
"self",
")",
",",
"index",
",... | Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up. | [
"Removes",
"a",
"particle",
"from",
"the",
"simulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1154-L1188 | train | 199,673 |
hannorein/rebound | rebound/simulation.py | Simulation.particles_ascii | def particles_ascii(self, prec=8):
"""
Returns an ASCII string with all particles' masses, radii, positions and velocities.
Parameters
----------
prec : int, optional
Number of digits after decimal point. Default 8.
"""
s = ""
for p in self.particles:
s += (("%%.%de "%prec) * 8)%(p.m, p.r, p.x, p.y, p.z, p.vx, p.vy, p.vz) + "\n"
if len(s):
s = s[:-1]
return s | python | def particles_ascii(self, prec=8):
"""
Returns an ASCII string with all particles' masses, radii, positions and velocities.
Parameters
----------
prec : int, optional
Number of digits after decimal point. Default 8.
"""
s = ""
for p in self.particles:
s += (("%%.%de "%prec) * 8)%(p.m, p.r, p.x, p.y, p.z, p.vx, p.vy, p.vz) + "\n"
if len(s):
s = s[:-1]
return s | [
"def",
"particles_ascii",
"(",
"self",
",",
"prec",
"=",
"8",
")",
":",
"s",
"=",
"\"\"",
"for",
"p",
"in",
"self",
".",
"particles",
":",
"s",
"+=",
"(",
"(",
"\"%%.%de \"",
"%",
"prec",
")",
"*",
"8",
")",
"%",
"(",
"p",
".",
"m",
",",
"p",... | Returns an ASCII string with all particles' masses, radii, positions and velocities.
Parameters
----------
prec : int, optional
Number of digits after decimal point. Default 8. | [
"Returns",
"an",
"ASCII",
"string",
"with",
"all",
"particles",
"masses",
"radii",
"positions",
"and",
"velocities",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1190-L1204 | train | 199,674 |
hannorein/rebound | rebound/simulation.py | Simulation.add_particles_ascii | def add_particles_ascii(self, s):
"""
Adds particles from an ASCII string.
Parameters
----------
s : string
One particle per line. Each line should include particle's mass, radius, position and velocity.
"""
for l in s.split("\n"):
r = l.split()
if len(r):
try:
r = [float(x) for x in r]
p = Particle(simulation=self, m=r[0], r=r[1], x=r[2], y=r[3], z=r[4], vx=r[5], vy=r[6], vz=r[7])
self.add(p)
except:
raise AttributeError("Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z).") | python | def add_particles_ascii(self, s):
"""
Adds particles from an ASCII string.
Parameters
----------
s : string
One particle per line. Each line should include particle's mass, radius, position and velocity.
"""
for l in s.split("\n"):
r = l.split()
if len(r):
try:
r = [float(x) for x in r]
p = Particle(simulation=self, m=r[0], r=r[1], x=r[2], y=r[3], z=r[4], vx=r[5], vy=r[6], vz=r[7])
self.add(p)
except:
raise AttributeError("Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z).") | [
"def",
"add_particles_ascii",
"(",
"self",
",",
"s",
")",
":",
"for",
"l",
"in",
"s",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"r",
"=",
"l",
".",
"split",
"(",
")",
"if",
"len",
"(",
"r",
")",
":",
"try",
":",
"r",
"=",
"[",
"float",
"(",
"... | Adds particles from an ASCII string.
Parameters
----------
s : string
One particle per line. Each line should include particle's mass, radius, position and velocity. | [
"Adds",
"particles",
"from",
"an",
"ASCII",
"string",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1206-L1223 | train | 199,675 |
hannorein/rebound | rebound/simulation.py | Simulation.calculate_orbits | def calculate_orbits(self, primary=None, jacobi_masses=False, heliocentric=None, barycentric=None):
"""
Calculate orbital parameters for all partices in the simulation.
By default this functions returns the orbits in Jacobi coordinates.
If MEGNO is enabled, variational particles will be ignored.
Parameters
----------
primary : rebound.Particle, optional
Set the primary against which to reference the osculating orbit. Default(use Jacobi center of mass)
jacobi_masses: bool
Whether to use jacobi primary mass in orbit calculation. (Default: False)
heliocentric: bool, DEPRECATED
To calculate heliocentric elements, pass primary=sim.particles[0]
barycentric : bool, DEPRECATED
To calculate barycentric elements, pass primary=sim.calculate_com()
Returns
-------
Returns an array of Orbits of length N-1.
"""
orbits = []
if heliocentric is not None or barycentric is not None:
raise AttributeError('heliocentric and barycentric keywords in calculate_orbits are deprecated. Pass primary keyword instead (sim.particles[0] for heliocentric and sim.calculate_com() for barycentric)')
if primary is None:
jacobi = True
primary = self.particles[0]
clibrebound.reb_get_com_of_pair.restype = Particle
else:
jacobi = False
for p in self.particles[1:self.N_real]:
if jacobi_masses is True:
interior_mass = primary.m
# orbit conversion uses mu=G*(p.m+primary.m) so set prim.m=Mjac-m so mu=G*Mjac
primary.m = self.particles[0].m*(p.m + interior_mass)/interior_mass - p.m
orbits.append(p.calculate_orbit(primary=primary))
primary.m = interior_mass # back to total mass of interior bodies to update com
else:
orbits.append(p.calculate_orbit(primary=primary))
if jacobi is True: # update com to include current particle for next iteration
primary = clibrebound.reb_get_com_of_pair(primary, p)
return orbits | python | def calculate_orbits(self, primary=None, jacobi_masses=False, heliocentric=None, barycentric=None):
"""
Calculate orbital parameters for all partices in the simulation.
By default this functions returns the orbits in Jacobi coordinates.
If MEGNO is enabled, variational particles will be ignored.
Parameters
----------
primary : rebound.Particle, optional
Set the primary against which to reference the osculating orbit. Default(use Jacobi center of mass)
jacobi_masses: bool
Whether to use jacobi primary mass in orbit calculation. (Default: False)
heliocentric: bool, DEPRECATED
To calculate heliocentric elements, pass primary=sim.particles[0]
barycentric : bool, DEPRECATED
To calculate barycentric elements, pass primary=sim.calculate_com()
Returns
-------
Returns an array of Orbits of length N-1.
"""
orbits = []
if heliocentric is not None or barycentric is not None:
raise AttributeError('heliocentric and barycentric keywords in calculate_orbits are deprecated. Pass primary keyword instead (sim.particles[0] for heliocentric and sim.calculate_com() for barycentric)')
if primary is None:
jacobi = True
primary = self.particles[0]
clibrebound.reb_get_com_of_pair.restype = Particle
else:
jacobi = False
for p in self.particles[1:self.N_real]:
if jacobi_masses is True:
interior_mass = primary.m
# orbit conversion uses mu=G*(p.m+primary.m) so set prim.m=Mjac-m so mu=G*Mjac
primary.m = self.particles[0].m*(p.m + interior_mass)/interior_mass - p.m
orbits.append(p.calculate_orbit(primary=primary))
primary.m = interior_mass # back to total mass of interior bodies to update com
else:
orbits.append(p.calculate_orbit(primary=primary))
if jacobi is True: # update com to include current particle for next iteration
primary = clibrebound.reb_get_com_of_pair(primary, p)
return orbits | [
"def",
"calculate_orbits",
"(",
"self",
",",
"primary",
"=",
"None",
",",
"jacobi_masses",
"=",
"False",
",",
"heliocentric",
"=",
"None",
",",
"barycentric",
"=",
"None",
")",
":",
"orbits",
"=",
"[",
"]",
"if",
"heliocentric",
"is",
"not",
"None",
"or"... | Calculate orbital parameters for all partices in the simulation.
By default this functions returns the orbits in Jacobi coordinates.
If MEGNO is enabled, variational particles will be ignored.
Parameters
----------
primary : rebound.Particle, optional
Set the primary against which to reference the osculating orbit. Default(use Jacobi center of mass)
jacobi_masses: bool
Whether to use jacobi primary mass in orbit calculation. (Default: False)
heliocentric: bool, DEPRECATED
To calculate heliocentric elements, pass primary=sim.particles[0]
barycentric : bool, DEPRECATED
To calculate barycentric elements, pass primary=sim.calculate_com()
Returns
-------
Returns an array of Orbits of length N-1. | [
"Calculate",
"orbital",
"parameters",
"for",
"all",
"partices",
"in",
"the",
"simulation",
".",
"By",
"default",
"this",
"functions",
"returns",
"the",
"orbits",
"in",
"Jacobi",
"coordinates",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1226-L1273 | train | 199,676 |
hannorein/rebound | rebound/simulation.py | Simulation.calculate_com | def calculate_com(self, first=0, last=None):
"""
Returns the center of momentum for all particles in the simulation.
Parameters
----------
first: int, optional
If ``first`` is specified, only calculate the center of momentum starting
from index=``first``.
last : int or None, optional
If ``last`` is specified only calculate the center of momentum up to
(but excluding) index=``last``. Same behavior as Python's range function.
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.add(m=1, x=-20)
>>> sim.add(m=1, x=-10)
>>> sim.add(m=1, x=0)
>>> sim.add(m=1, x=10)
>>> sim.add(m=1, x=20)
>>> com = sim.calculate_com()
>>> com.x
0.0
>>> com = sim.calculate_com(first=2,last=4) # Considers indices 2,3
>>> com.x
5.0
"""
if last is None:
last = self.N_real
clibrebound.reb_get_com_range.restype = Particle
return clibrebound.reb_get_com_range(byref(self), c_int(first), c_int(last)) | python | def calculate_com(self, first=0, last=None):
"""
Returns the center of momentum for all particles in the simulation.
Parameters
----------
first: int, optional
If ``first`` is specified, only calculate the center of momentum starting
from index=``first``.
last : int or None, optional
If ``last`` is specified only calculate the center of momentum up to
(but excluding) index=``last``. Same behavior as Python's range function.
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.add(m=1, x=-20)
>>> sim.add(m=1, x=-10)
>>> sim.add(m=1, x=0)
>>> sim.add(m=1, x=10)
>>> sim.add(m=1, x=20)
>>> com = sim.calculate_com()
>>> com.x
0.0
>>> com = sim.calculate_com(first=2,last=4) # Considers indices 2,3
>>> com.x
5.0
"""
if last is None:
last = self.N_real
clibrebound.reb_get_com_range.restype = Particle
return clibrebound.reb_get_com_range(byref(self), c_int(first), c_int(last)) | [
"def",
"calculate_com",
"(",
"self",
",",
"first",
"=",
"0",
",",
"last",
"=",
"None",
")",
":",
"if",
"last",
"is",
"None",
":",
"last",
"=",
"self",
".",
"N_real",
"clibrebound",
".",
"reb_get_com_range",
".",
"restype",
"=",
"Particle",
"return",
"c... | Returns the center of momentum for all particles in the simulation.
Parameters
----------
first: int, optional
If ``first`` is specified, only calculate the center of momentum starting
from index=``first``.
last : int or None, optional
If ``last`` is specified only calculate the center of momentum up to
(but excluding) index=``last``. Same behavior as Python's range function.
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.add(m=1, x=-20)
>>> sim.add(m=1, x=-10)
>>> sim.add(m=1, x=0)
>>> sim.add(m=1, x=10)
>>> sim.add(m=1, x=20)
>>> com = sim.calculate_com()
>>> com.x
0.0
>>> com = sim.calculate_com(first=2,last=4) # Considers indices 2,3
>>> com.x
5.0 | [
"Returns",
"the",
"center",
"of",
"momentum",
"for",
"all",
"particles",
"in",
"the",
"simulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1276-L1308 | train | 199,677 |
hannorein/rebound | rebound/simulation.py | Simulation.serialize_particle_data | def serialize_particle_data(self,**kwargs):
"""
Fast way to access serialized particle data via numpy arrays.
This function can directly set the values of numpy arrays to
current particle data. This is significantly faster than accessing
particle data via `sim.particles` as all the copying is done
on the C side.
No memory is allocated by this function.
It expects correctly sized numpy arrays as arguments. The argument
name indicates what kind of particle data is written to the array.
Possible argument names are "hash", "m", "r", "xyz", "vxvyvz", and
"xyzvxvyvz". The datatype for the "hash" array needs to be uint32.
The other arrays expect a datatype of float64. The lengths of
"hash", "m", "r" arrays need to be at least sim.N. The lengths of
xyz and vxvyvz need to be at least 3*sim.N. The length of
"xyzvxvyvz" arrays need to be 6*sim.N. Exceptions are raised
otherwise.
Note that this routine is only intended for special use cases
where speed is an issue. For normal use, it is recommended to
access particle data via the `sim.particles` array. Be aware of
potential issues that arrise by directly accesing the memory of
numpy arrays (see numpy documentation for more details).
Examples
--------
This sets an array to the xyz positions of all particles:
>>> import numpy as np
>>> a = np.zeros((sim.N,3),dtype="float64")
>>> sim.serialize_particle_data(xyz=a)
>>> print(a)
To get all current radii of particles:
>>> a = np.zeros(sim.N,dtype="float64")
>>> sim.serialize_particle_data(r=a)
>>> print(a)
To get all current radii and hashes of particles:
>>> a = np.zeros(sim.N,dtype="float64")
>>> b = np.zeros(sim.N,dtype="uint32")
>>> sim.serialize_particle_data(r=a,hash=b)
>>> print(a,b)
"""
N = self.N
possible_keys = ["hash","m","r","xyz","vxvyvz","xyzvxvyvz"]
d = {x:None for x in possible_keys}
for k,v in kwargs.items():
if k in d:
if k == "hash":
if v.dtype!= "uint32":
raise AttributeError("Expected 'uint32' data type for '%s' array."%k)
if v.size<N:
raise AttributeError("Array '%s' is not large enough."%k)
d[k] = v.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
else:
if v.dtype!= "float64":
raise AttributeError("Expected 'float64' data type for %s array."%k)
if k in ["xyz", "vxvyvz"]:
minsize = 3*N
elif k in ["xyzvxvyvz"]:
minsize = 6*N
else:
minsize = N
if v.size<minsize:
raise AttributeError("Array '%s' is not large enough."%k)
d[k] = v.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
else:
raise AttributeError("Only '%s' are currently supported attributes for serialization." % "', '".join(d.keys()))
clibrebound.reb_serialize_particle_data(byref(self), d["hash"], d["m"], d["r"], d["xyz"], d["vxvyvz"], d["xyzvxvyvz"]) | python | def serialize_particle_data(self,**kwargs):
"""
Fast way to access serialized particle data via numpy arrays.
This function can directly set the values of numpy arrays to
current particle data. This is significantly faster than accessing
particle data via `sim.particles` as all the copying is done
on the C side.
No memory is allocated by this function.
It expects correctly sized numpy arrays as arguments. The argument
name indicates what kind of particle data is written to the array.
Possible argument names are "hash", "m", "r", "xyz", "vxvyvz", and
"xyzvxvyvz". The datatype for the "hash" array needs to be uint32.
The other arrays expect a datatype of float64. The lengths of
"hash", "m", "r" arrays need to be at least sim.N. The lengths of
xyz and vxvyvz need to be at least 3*sim.N. The length of
"xyzvxvyvz" arrays need to be 6*sim.N. Exceptions are raised
otherwise.
Note that this routine is only intended for special use cases
where speed is an issue. For normal use, it is recommended to
access particle data via the `sim.particles` array. Be aware of
potential issues that arrise by directly accesing the memory of
numpy arrays (see numpy documentation for more details).
Examples
--------
This sets an array to the xyz positions of all particles:
>>> import numpy as np
>>> a = np.zeros((sim.N,3),dtype="float64")
>>> sim.serialize_particle_data(xyz=a)
>>> print(a)
To get all current radii of particles:
>>> a = np.zeros(sim.N,dtype="float64")
>>> sim.serialize_particle_data(r=a)
>>> print(a)
To get all current radii and hashes of particles:
>>> a = np.zeros(sim.N,dtype="float64")
>>> b = np.zeros(sim.N,dtype="uint32")
>>> sim.serialize_particle_data(r=a,hash=b)
>>> print(a,b)
"""
N = self.N
possible_keys = ["hash","m","r","xyz","vxvyvz","xyzvxvyvz"]
d = {x:None for x in possible_keys}
for k,v in kwargs.items():
if k in d:
if k == "hash":
if v.dtype!= "uint32":
raise AttributeError("Expected 'uint32' data type for '%s' array."%k)
if v.size<N:
raise AttributeError("Array '%s' is not large enough."%k)
d[k] = v.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
else:
if v.dtype!= "float64":
raise AttributeError("Expected 'float64' data type for %s array."%k)
if k in ["xyz", "vxvyvz"]:
minsize = 3*N
elif k in ["xyzvxvyvz"]:
minsize = 6*N
else:
minsize = N
if v.size<minsize:
raise AttributeError("Array '%s' is not large enough."%k)
d[k] = v.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
else:
raise AttributeError("Only '%s' are currently supported attributes for serialization." % "', '".join(d.keys()))
clibrebound.reb_serialize_particle_data(byref(self), d["hash"], d["m"], d["r"], d["xyz"], d["vxvyvz"], d["xyzvxvyvz"]) | [
"def",
"serialize_particle_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"N",
"=",
"self",
".",
"N",
"possible_keys",
"=",
"[",
"\"hash\"",
",",
"\"m\"",
",",
"\"r\"",
",",
"\"xyz\"",
",",
"\"vxvyvz\"",
",",
"\"xyzvxvyvz\"",
"]",
"d",
"=",
"{"... | Fast way to access serialized particle data via numpy arrays.
This function can directly set the values of numpy arrays to
current particle data. This is significantly faster than accessing
particle data via `sim.particles` as all the copying is done
on the C side.
No memory is allocated by this function.
It expects correctly sized numpy arrays as arguments. The argument
name indicates what kind of particle data is written to the array.
Possible argument names are "hash", "m", "r", "xyz", "vxvyvz", and
"xyzvxvyvz". The datatype for the "hash" array needs to be uint32.
The other arrays expect a datatype of float64. The lengths of
"hash", "m", "r" arrays need to be at least sim.N. The lengths of
xyz and vxvyvz need to be at least 3*sim.N. The length of
"xyzvxvyvz" arrays need to be 6*sim.N. Exceptions are raised
otherwise.
Note that this routine is only intended for special use cases
where speed is an issue. For normal use, it is recommended to
access particle data via the `sim.particles` array. Be aware of
potential issues that arrise by directly accesing the memory of
numpy arrays (see numpy documentation for more details).
Examples
--------
This sets an array to the xyz positions of all particles:
>>> import numpy as np
>>> a = np.zeros((sim.N,3),dtype="float64")
>>> sim.serialize_particle_data(xyz=a)
>>> print(a)
To get all current radii of particles:
>>> a = np.zeros(sim.N,dtype="float64")
>>> sim.serialize_particle_data(r=a)
>>> print(a)
To get all current radii and hashes of particles:
>>> a = np.zeros(sim.N,dtype="float64")
>>> b = np.zeros(sim.N,dtype="uint32")
>>> sim.serialize_particle_data(r=a,hash=b)
>>> print(a,b) | [
"Fast",
"way",
"to",
"access",
"serialized",
"particle",
"data",
"via",
"numpy",
"arrays",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1311-L1386 | train | 199,678 |
hannorein/rebound | rebound/simulation.py | Simulation.calculate_energy | def calculate_energy(self):
"""
Returns the sum of potential and kinetic energy of all particles in the simulation.
"""
clibrebound.reb_tools_energy.restype = c_double
return clibrebound.reb_tools_energy(byref(self)) | python | def calculate_energy(self):
"""
Returns the sum of potential and kinetic energy of all particles in the simulation.
"""
clibrebound.reb_tools_energy.restype = c_double
return clibrebound.reb_tools_energy(byref(self)) | [
"def",
"calculate_energy",
"(",
"self",
")",
":",
"clibrebound",
".",
"reb_tools_energy",
".",
"restype",
"=",
"c_double",
"return",
"clibrebound",
".",
"reb_tools_energy",
"(",
"byref",
"(",
"self",
")",
")"
] | Returns the sum of potential and kinetic energy of all particles in the simulation. | [
"Returns",
"the",
"sum",
"of",
"potential",
"and",
"kinetic",
"energy",
"of",
"all",
"particles",
"in",
"the",
"simulation",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1440-L1445 | train | 199,679 |
hannorein/rebound | rebound/simulation.py | Simulation.configure_box | def configure_box(self, boxsize, root_nx=1, root_ny=1, root_nz=1):
"""
Initialize the simulation box.
This function only needs to be called it boundary conditions other than "none"
are used. In such a case the boxsize must be known and is set with this function.
Parameters
----------
boxsize : float, optional
The size of one root box.
root_nx, root_ny, root_nz : int, optional
The number of root boxes in each direction. The total size of the simulation box
will be ``root_nx * boxsize``, ``root_ny * boxsize`` and ``root_nz * boxsize``.
By default there will be exactly one root box in each direction.
"""
clibrebound.reb_configure_box(byref(self), c_double(boxsize), c_int(root_nx), c_int(root_ny), c_int(root_nz))
return | python | def configure_box(self, boxsize, root_nx=1, root_ny=1, root_nz=1):
"""
Initialize the simulation box.
This function only needs to be called it boundary conditions other than "none"
are used. In such a case the boxsize must be known and is set with this function.
Parameters
----------
boxsize : float, optional
The size of one root box.
root_nx, root_ny, root_nz : int, optional
The number of root boxes in each direction. The total size of the simulation box
will be ``root_nx * boxsize``, ``root_ny * boxsize`` and ``root_nz * boxsize``.
By default there will be exactly one root box in each direction.
"""
clibrebound.reb_configure_box(byref(self), c_double(boxsize), c_int(root_nx), c_int(root_ny), c_int(root_nz))
return | [
"def",
"configure_box",
"(",
"self",
",",
"boxsize",
",",
"root_nx",
"=",
"1",
",",
"root_ny",
"=",
"1",
",",
"root_nz",
"=",
"1",
")",
":",
"clibrebound",
".",
"reb_configure_box",
"(",
"byref",
"(",
"self",
")",
",",
"c_double",
"(",
"boxsize",
")",
... | Initialize the simulation box.
This function only needs to be called it boundary conditions other than "none"
are used. In such a case the boxsize must be known and is set with this function.
Parameters
----------
boxsize : float, optional
The size of one root box.
root_nx, root_ny, root_nz : int, optional
The number of root boxes in each direction. The total size of the simulation box
will be ``root_nx * boxsize``, ``root_ny * boxsize`` and ``root_nz * boxsize``.
By default there will be exactly one root box in each direction. | [
"Initialize",
"the",
"simulation",
"box",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1455-L1472 | train | 199,680 |
hannorein/rebound | rebound/simulation.py | Simulation.configure_ghostboxes | def configure_ghostboxes(self, nghostx=0, nghosty=0, nghostz=0):
"""
Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes).
"""
clibrebound.nghostx = c_int(nghostx)
clibrebound.nghosty = c_int(nghosty)
clibrebound.nghostz = c_int(nghostz)
return | python | def configure_ghostboxes(self, nghostx=0, nghosty=0, nghostz=0):
"""
Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes).
"""
clibrebound.nghostx = c_int(nghostx)
clibrebound.nghosty = c_int(nghosty)
clibrebound.nghostz = c_int(nghostz)
return | [
"def",
"configure_ghostboxes",
"(",
"self",
",",
"nghostx",
"=",
"0",
",",
"nghosty",
"=",
"0",
",",
"nghostz",
"=",
"0",
")",
":",
"clibrebound",
".",
"nghostx",
"=",
"c_int",
"(",
"nghostx",
")",
"clibrebound",
".",
"nghosty",
"=",
"c_int",
"(",
"ngh... | Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes). | [
"Initialize",
"the",
"ghost",
"boxes",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1474-L1490 | train | 199,681 |
hannorein/rebound | rebound/simulation.py | Simulation.save | def save(self, filename):
"""
Save the entire REBOUND simulation to a binary file.
"""
clibrebound.reb_output_binary(byref(self), c_char_p(filename.encode("ascii"))) | python | def save(self, filename):
"""
Save the entire REBOUND simulation to a binary file.
"""
clibrebound.reb_output_binary(byref(self), c_char_p(filename.encode("ascii"))) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"clibrebound",
".",
"reb_output_binary",
"(",
"byref",
"(",
"self",
")",
",",
"c_char_p",
"(",
"filename",
".",
"encode",
"(",
"\"ascii\"",
")",
")",
")"
] | Save the entire REBOUND simulation to a binary file. | [
"Save",
"the",
"entire",
"REBOUND",
"simulation",
"to",
"a",
"binary",
"file",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1494-L1498 | train | 199,682 |
hannorein/rebound | rebound/simulation.py | Variation.particles | def particles(self):
"""
Access the variational particles corresponding to this set of variational equations.
The function returns a list of particles which are sorted in the same way as those in
sim.particles
The particles are pointers and thus can be modified.
If there are N real particles, this function will also return a list of N particles (all of which are
variational particles).
"""
sim = self._sim.contents
ps = []
if self.testparticle>=0:
N = 1
else:
N = sim.N-sim.N_var
ParticleList = Particle*N
ps = ParticleList.from_address(ctypes.addressof(sim._particles.contents)+self.index*ctypes.sizeof(Particle))
return ps | python | def particles(self):
"""
Access the variational particles corresponding to this set of variational equations.
The function returns a list of particles which are sorted in the same way as those in
sim.particles
The particles are pointers and thus can be modified.
If there are N real particles, this function will also return a list of N particles (all of which are
variational particles).
"""
sim = self._sim.contents
ps = []
if self.testparticle>=0:
N = 1
else:
N = sim.N-sim.N_var
ParticleList = Particle*N
ps = ParticleList.from_address(ctypes.addressof(sim._particles.contents)+self.index*ctypes.sizeof(Particle))
return ps | [
"def",
"particles",
"(",
"self",
")",
":",
"sim",
"=",
"self",
".",
"_sim",
".",
"contents",
"ps",
"=",
"[",
"]",
"if",
"self",
".",
"testparticle",
">=",
"0",
":",
"N",
"=",
"1",
"else",
":",
"N",
"=",
"sim",
".",
"N",
"-",
"sim",
".",
"N_va... | Access the variational particles corresponding to this set of variational equations.
The function returns a list of particles which are sorted in the same way as those in
sim.particles
The particles are pointers and thus can be modified.
If there are N real particles, this function will also return a list of N particles (all of which are
variational particles). | [
"Access",
"the",
"variational",
"particles",
"corresponding",
"to",
"this",
"set",
"of",
"variational",
"equations",
"."
] | bb0f814c98e629401acaab657cae2304b0e003f7 | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1670-L1691 | train | 199,683 |
AltSchool/dynamic-rest | dynamic_rest/links.py | merge_link_object | def merge_link_object(serializer, data, instance):
"""Add a 'links' attribute to the data that maps field names to URLs.
NOTE: This is the format that Ember Data supports, but alternative
implementations are possible to support other formats.
"""
link_object = {}
if not getattr(instance, 'pk', None):
# If instance doesn't have a `pk` field, we'll assume it doesn't
# have a canonical resource URL to hang a link off of.
# This generally only affectes Ephemeral Objects.
return data
link_fields = serializer.get_link_fields()
for name, field in six.iteritems(link_fields):
# For included fields, omit link if there's no data.
if name in data and not data[name]:
continue
link = getattr(field, 'link', None)
if link is None:
base_url = ''
if settings.ENABLE_HOST_RELATIVE_LINKS:
# if the resource isn't registered, this will default back to
# using resource-relative urls for links.
base_url = DynamicRouter.get_canonical_path(
serializer.get_resource_key(),
instance.pk
) or ''
link = '%s%s/' % (base_url, name)
# Default to DREST-generated relation endpoints.
elif callable(link):
link = link(name, field, data, instance)
link_object[name] = link
if link_object:
data['links'] = link_object
return data | python | def merge_link_object(serializer, data, instance):
"""Add a 'links' attribute to the data that maps field names to URLs.
NOTE: This is the format that Ember Data supports, but alternative
implementations are possible to support other formats.
"""
link_object = {}
if not getattr(instance, 'pk', None):
# If instance doesn't have a `pk` field, we'll assume it doesn't
# have a canonical resource URL to hang a link off of.
# This generally only affectes Ephemeral Objects.
return data
link_fields = serializer.get_link_fields()
for name, field in six.iteritems(link_fields):
# For included fields, omit link if there's no data.
if name in data and not data[name]:
continue
link = getattr(field, 'link', None)
if link is None:
base_url = ''
if settings.ENABLE_HOST_RELATIVE_LINKS:
# if the resource isn't registered, this will default back to
# using resource-relative urls for links.
base_url = DynamicRouter.get_canonical_path(
serializer.get_resource_key(),
instance.pk
) or ''
link = '%s%s/' % (base_url, name)
# Default to DREST-generated relation endpoints.
elif callable(link):
link = link(name, field, data, instance)
link_object[name] = link
if link_object:
data['links'] = link_object
return data | [
"def",
"merge_link_object",
"(",
"serializer",
",",
"data",
",",
"instance",
")",
":",
"link_object",
"=",
"{",
"}",
"if",
"not",
"getattr",
"(",
"instance",
",",
"'pk'",
",",
"None",
")",
":",
"# If instance doesn't have a `pk` field, we'll assume it doesn't",
"#... | Add a 'links' attribute to the data that maps field names to URLs.
NOTE: This is the format that Ember Data supports, but alternative
implementations are possible to support other formats. | [
"Add",
"a",
"links",
"attribute",
"to",
"the",
"data",
"that",
"maps",
"field",
"names",
"to",
"URLs",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/links.py#L8-L48 | train | 199,684 |
AltSchool/dynamic-rest | dynamic_rest/processors.py | register_post_processor | def register_post_processor(func):
"""
Register a post processor function to be run as the final step in
serialization. The data passed in will already have gone through the
sideloading processor.
Usage:
@register_post_processor
def my_post_processor(data):
# do stuff with `data`
return data
"""
global POST_PROCESSORS
key = func.__name__
POST_PROCESSORS[key] = func
return func | python | def register_post_processor(func):
"""
Register a post processor function to be run as the final step in
serialization. The data passed in will already have gone through the
sideloading processor.
Usage:
@register_post_processor
def my_post_processor(data):
# do stuff with `data`
return data
"""
global POST_PROCESSORS
key = func.__name__
POST_PROCESSORS[key] = func
return func | [
"def",
"register_post_processor",
"(",
"func",
")",
":",
"global",
"POST_PROCESSORS",
"key",
"=",
"func",
".",
"__name__",
"POST_PROCESSORS",
"[",
"key",
"]",
"=",
"func",
"return",
"func"
] | Register a post processor function to be run as the final step in
serialization. The data passed in will already have gone through the
sideloading processor.
Usage:
@register_post_processor
def my_post_processor(data):
# do stuff with `data`
return data | [
"Register",
"a",
"post",
"processor",
"function",
"to",
"be",
"run",
"as",
"the",
"final",
"step",
"in",
"serialization",
".",
"The",
"data",
"passed",
"in",
"will",
"already",
"have",
"gone",
"through",
"the",
"sideloading",
"processor",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/processors.py#L15-L32 | train | 199,685 |
AltSchool/dynamic-rest | dynamic_rest/processors.py | SideloadingProcessor.process | def process(self, obj, parent=None, parent_key=None, depth=0):
"""Recursively process the data for sideloading.
Converts the nested representation into a sideloaded representation.
"""
if isinstance(obj, list):
for key, o in enumerate(obj):
# traverse into lists of objects
self.process(o, parent=obj, parent_key=key, depth=depth)
elif isinstance(obj, dict):
dynamic = self.is_dynamic(obj)
returned = isinstance(obj, ReturnDict)
if dynamic or returned:
# recursively check all fields
for key, o in six.iteritems(obj):
if isinstance(o, list) or isinstance(o, dict):
# lists or dicts indicate a relation
self.process(
o,
parent=obj,
parent_key=key,
depth=depth +
1
)
if not dynamic or getattr(obj, 'embed', False):
return
serializer = obj.serializer
name = serializer.get_plural_name()
instance = getattr(obj, 'instance', serializer.instance)
instance_pk = instance.pk if instance else None
pk = getattr(obj, 'pk_value', instance_pk) or instance_pk
# For polymorphic relations, `pk` can be a dict, so use the
# string representation (dict isn't hashable).
pk_key = repr(pk)
# sideloading
seen = True
# if this object has not yet been seen
if pk_key not in self.seen[name]:
seen = False
self.seen[name].add(pk_key)
# prevent sideloading the primary objects
if depth == 0:
return
# TODO: spec out the exact behavior for secondary instances of
# the primary resource
# if the primary resource is embedded, add it to a prefixed key
if name == self.plural_name:
name = '%s%s' % (
settings.ADDITIONAL_PRIMARY_RESOURCE_PREFIX,
name
)
if not seen:
# allocate a top-level key in the data for this resource
# type
if name not in self.data:
self.data[name] = []
# move the object into a new top-level bucket
# and mark it as seen
self.data[name].append(obj)
else:
# obj sideloaded, but maybe with other fields
for o in self.data.get(name, []):
if o.instance.pk == pk:
o.update(obj)
break
# replace the object with a reference
if parent is not None and parent_key is not None:
parent[parent_key] = pk | python | def process(self, obj, parent=None, parent_key=None, depth=0):
"""Recursively process the data for sideloading.
Converts the nested representation into a sideloaded representation.
"""
if isinstance(obj, list):
for key, o in enumerate(obj):
# traverse into lists of objects
self.process(o, parent=obj, parent_key=key, depth=depth)
elif isinstance(obj, dict):
dynamic = self.is_dynamic(obj)
returned = isinstance(obj, ReturnDict)
if dynamic or returned:
# recursively check all fields
for key, o in six.iteritems(obj):
if isinstance(o, list) or isinstance(o, dict):
# lists or dicts indicate a relation
self.process(
o,
parent=obj,
parent_key=key,
depth=depth +
1
)
if not dynamic or getattr(obj, 'embed', False):
return
serializer = obj.serializer
name = serializer.get_plural_name()
instance = getattr(obj, 'instance', serializer.instance)
instance_pk = instance.pk if instance else None
pk = getattr(obj, 'pk_value', instance_pk) or instance_pk
# For polymorphic relations, `pk` can be a dict, so use the
# string representation (dict isn't hashable).
pk_key = repr(pk)
# sideloading
seen = True
# if this object has not yet been seen
if pk_key not in self.seen[name]:
seen = False
self.seen[name].add(pk_key)
# prevent sideloading the primary objects
if depth == 0:
return
# TODO: spec out the exact behavior for secondary instances of
# the primary resource
# if the primary resource is embedded, add it to a prefixed key
if name == self.plural_name:
name = '%s%s' % (
settings.ADDITIONAL_PRIMARY_RESOURCE_PREFIX,
name
)
if not seen:
# allocate a top-level key in the data for this resource
# type
if name not in self.data:
self.data[name] = []
# move the object into a new top-level bucket
# and mark it as seen
self.data[name].append(obj)
else:
# obj sideloaded, but maybe with other fields
for o in self.data.get(name, []):
if o.instance.pk == pk:
o.update(obj)
break
# replace the object with a reference
if parent is not None and parent_key is not None:
parent[parent_key] = pk | [
"def",
"process",
"(",
"self",
",",
"obj",
",",
"parent",
"=",
"None",
",",
"parent_key",
"=",
"None",
",",
"depth",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"for",
"key",
",",
"o",
"in",
"enumerate",
"(",
"obj",... | Recursively process the data for sideloading.
Converts the nested representation into a sideloaded representation. | [
"Recursively",
"process",
"the",
"data",
"for",
"sideloading",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/processors.py#L85-L162 | train | 199,686 |
AltSchool/dynamic-rest | dynamic_rest/fields/common.py | WithRelationalFieldMixin._get_request_fields_from_parent | def _get_request_fields_from_parent(self):
"""Get request fields from the parent serializer."""
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) | python | def _get_request_fields_from_parent(self):
"""Get request fields from the parent serializer."""
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) | [
"def",
"_get_request_fields_from_parent",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"parent",
":",
"return",
"None",
"if",
"not",
"getattr",
"(",
"self",
".",
"parent",
",",
"'request_fields'",
")",
":",
"return",
"None",
"if",
"not",
"isinstance",
... | Get request fields from the parent serializer. | [
"Get",
"request",
"fields",
"from",
"the",
"parent",
"serializer",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/common.py#L6-L17 | train | 199,687 |
AltSchool/dynamic-rest | dynamic_rest/metadata.py | DynamicMetadata.determine_metadata | def determine_metadata(self, request, view):
"""Adds `properties` and `features` to the metadata response."""
metadata = super(
DynamicMetadata,
self).determine_metadata(
request,
view)
metadata['features'] = getattr(view, 'features', [])
if hasattr(view, 'get_serializer'):
serializer = view.get_serializer(dynamic=False)
if hasattr(serializer, 'get_name'):
metadata['resource_name'] = serializer.get_name()
if hasattr(serializer, 'get_plural_name'):
metadata['resource_name_plural'] = serializer.get_plural_name()
metadata['properties'] = self.get_serializer_info(serializer)
return metadata | python | def determine_metadata(self, request, view):
"""Adds `properties` and `features` to the metadata response."""
metadata = super(
DynamicMetadata,
self).determine_metadata(
request,
view)
metadata['features'] = getattr(view, 'features', [])
if hasattr(view, 'get_serializer'):
serializer = view.get_serializer(dynamic=False)
if hasattr(serializer, 'get_name'):
metadata['resource_name'] = serializer.get_name()
if hasattr(serializer, 'get_plural_name'):
metadata['resource_name_plural'] = serializer.get_plural_name()
metadata['properties'] = self.get_serializer_info(serializer)
return metadata | [
"def",
"determine_metadata",
"(",
"self",
",",
"request",
",",
"view",
")",
":",
"metadata",
"=",
"super",
"(",
"DynamicMetadata",
",",
"self",
")",
".",
"determine_metadata",
"(",
"request",
",",
"view",
")",
"metadata",
"[",
"'features'",
"]",
"=",
"geta... | Adds `properties` and `features` to the metadata response. | [
"Adds",
"properties",
"and",
"features",
"to",
"the",
"metadata",
"response",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/metadata.py#L22-L37 | train | 199,688 |
AltSchool/dynamic-rest | dynamic_rest/metadata.py | DynamicMetadata.get_field_info | def get_field_info(self, field):
"""Adds `related_to` and `nullable` to the metadata response."""
field_info = OrderedDict()
for attr in ('required', 'read_only', 'default', 'label'):
field_info[attr] = getattr(field, attr)
if field_info['default'] is empty:
field_info['default'] = None
if hasattr(field, 'immutable'):
field_info['immutable'] = field.immutable
field_info['nullable'] = field.allow_null
if hasattr(field, 'choices'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
many = False
if isinstance(field, DynamicRelationField):
field = field.serializer
if isinstance(field, ListSerializer):
field = field.child
many = True
if isinstance(field, ModelSerializer):
type = 'many' if many else 'one'
field_info['related_to'] = field.get_plural_name()
else:
type = self.label_lookup[field]
field_info['type'] = type
return field_info | python | def get_field_info(self, field):
"""Adds `related_to` and `nullable` to the metadata response."""
field_info = OrderedDict()
for attr in ('required', 'read_only', 'default', 'label'):
field_info[attr] = getattr(field, attr)
if field_info['default'] is empty:
field_info['default'] = None
if hasattr(field, 'immutable'):
field_info['immutable'] = field.immutable
field_info['nullable'] = field.allow_null
if hasattr(field, 'choices'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
many = False
if isinstance(field, DynamicRelationField):
field = field.serializer
if isinstance(field, ListSerializer):
field = field.child
many = True
if isinstance(field, ModelSerializer):
type = 'many' if many else 'one'
field_info['related_to'] = field.get_plural_name()
else:
type = self.label_lookup[field]
field_info['type'] = type
return field_info | [
"def",
"get_field_info",
"(",
"self",
",",
"field",
")",
":",
"field_info",
"=",
"OrderedDict",
"(",
")",
"for",
"attr",
"in",
"(",
"'required'",
",",
"'read_only'",
",",
"'default'",
",",
"'label'",
")",
":",
"field_info",
"[",
"attr",
"]",
"=",
"getatt... | Adds `related_to` and `nullable` to the metadata response. | [
"Adds",
"related_to",
"and",
"nullable",
"to",
"the",
"metadata",
"response",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/metadata.py#L39-L70 | train | 199,689 |
AltSchool/dynamic-rest | dynamic_rest/meta.py | get_model_field | def get_model_field(model, field_name):
"""Return a field given a model and field name.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
A Django field if `field_name` is a valid field for `model`,
None otherwise.
"""
meta = model._meta
try:
if DJANGO19:
field = meta.get_field(field_name)
else:
field = meta.get_field_by_name(field_name)[0]
return field
except:
if DJANGO19:
related_objs = (
f for f in meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
)
related_m2m_objs = (
f for f in meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
)
else:
related_objs = meta.get_all_related_objects()
related_m2m_objs = meta.get_all_related_many_to_many_objects()
related_objects = {
o.get_accessor_name(): o
for o in chain(related_objs, related_m2m_objs)
}
if field_name in related_objects:
return related_objects[field_name]
else:
# check virtual fields (1.7)
if hasattr(meta, 'virtual_fields'):
for field in meta.virtual_fields:
if field.name == field_name:
return field
raise AttributeError(
'%s is not a valid field for %s' % (field_name, model)
) | python | def get_model_field(model, field_name):
"""Return a field given a model and field name.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
A Django field if `field_name` is a valid field for `model`,
None otherwise.
"""
meta = model._meta
try:
if DJANGO19:
field = meta.get_field(field_name)
else:
field = meta.get_field_by_name(field_name)[0]
return field
except:
if DJANGO19:
related_objs = (
f for f in meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
)
related_m2m_objs = (
f for f in meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
)
else:
related_objs = meta.get_all_related_objects()
related_m2m_objs = meta.get_all_related_many_to_many_objects()
related_objects = {
o.get_accessor_name(): o
for o in chain(related_objs, related_m2m_objs)
}
if field_name in related_objects:
return related_objects[field_name]
else:
# check virtual fields (1.7)
if hasattr(meta, 'virtual_fields'):
for field in meta.virtual_fields:
if field.name == field_name:
return field
raise AttributeError(
'%s is not a valid field for %s' % (field_name, model)
) | [
"def",
"get_model_field",
"(",
"model",
",",
"field_name",
")",
":",
"meta",
"=",
"model",
".",
"_meta",
"try",
":",
"if",
"DJANGO19",
":",
"field",
"=",
"meta",
".",
"get_field",
"(",
"field_name",
")",
"else",
":",
"field",
"=",
"meta",
".",
"get_fie... | Return a field given a model and field name.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
A Django field if `field_name` is a valid field for `model`,
None otherwise. | [
"Return",
"a",
"field",
"given",
"a",
"model",
"and",
"field",
"name",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/meta.py#L36-L84 | train | 199,690 |
AltSchool/dynamic-rest | dynamic_rest/meta.py | is_field_remote | def is_field_remote(model, field_name):
"""Check whether a given model field is a remote field.
A remote field is the inverse of a one-to-many or a
many-to-many relationship.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
True if `field_name` is a remote field, False otherwise.
"""
if not hasattr(model, '_meta'):
# ephemeral model with no metaclass
return False
model_field = get_model_field(model, field_name)
return isinstance(model_field, (ManyToManyField, RelatedObject)) | python | def is_field_remote(model, field_name):
"""Check whether a given model field is a remote field.
A remote field is the inverse of a one-to-many or a
many-to-many relationship.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
True if `field_name` is a remote field, False otherwise.
"""
if not hasattr(model, '_meta'):
# ephemeral model with no metaclass
return False
model_field = get_model_field(model, field_name)
return isinstance(model_field, (ManyToManyField, RelatedObject)) | [
"def",
"is_field_remote",
"(",
"model",
",",
"field_name",
")",
":",
"if",
"not",
"hasattr",
"(",
"model",
",",
"'_meta'",
")",
":",
"# ephemeral model with no metaclass",
"return",
"False",
"model_field",
"=",
"get_model_field",
"(",
"model",
",",
"field_name",
... | Check whether a given model field is a remote field.
A remote field is the inverse of a one-to-many or a
many-to-many relationship.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
True if `field_name` is a remote field, False otherwise. | [
"Check",
"whether",
"a",
"given",
"model",
"field",
"is",
"a",
"remote",
"field",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/meta.py#L117-L135 | train | 199,691 |
AltSchool/dynamic-rest | dynamic_rest/bases.py | resettable_cached_property | def resettable_cached_property(func):
"""Decorator to add cached computed properties to an object.
Similar to Django's `cached_property` decorator, except stores
all the data under a single well-known key so that it can easily
be blown away.
"""
def wrapper(self):
if not hasattr(self, '_resettable_cached_properties'):
self._resettable_cached_properties = {}
if func.__name__ not in self._resettable_cached_properties:
self._resettable_cached_properties[func.__name__] = func(self)
return self._resettable_cached_properties[func.__name__]
# Returns a property whose getter is the 'wrapper' function
return property(wrapper) | python | def resettable_cached_property(func):
"""Decorator to add cached computed properties to an object.
Similar to Django's `cached_property` decorator, except stores
all the data under a single well-known key so that it can easily
be blown away.
"""
def wrapper(self):
if not hasattr(self, '_resettable_cached_properties'):
self._resettable_cached_properties = {}
if func.__name__ not in self._resettable_cached_properties:
self._resettable_cached_properties[func.__name__] = func(self)
return self._resettable_cached_properties[func.__name__]
# Returns a property whose getter is the 'wrapper' function
return property(wrapper) | [
"def",
"resettable_cached_property",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_resettable_cached_properties'",
")",
":",
"self",
".",
"_resettable_cached_properties",
"=",
"{",
"}",
"if",
"func... | Decorator to add cached computed properties to an object.
Similar to Django's `cached_property` decorator, except stores
all the data under a single well-known key so that it can easily
be blown away. | [
"Decorator",
"to",
"add",
"cached",
"computed",
"properties",
"to",
"an",
"object",
".",
"Similar",
"to",
"Django",
"s",
"cached_property",
"decorator",
"except",
"stores",
"all",
"the",
"data",
"under",
"a",
"single",
"well",
"-",
"known",
"key",
"so",
"tha... | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/bases.py#L10-L25 | train | 199,692 |
AltSchool/dynamic-rest | dynamic_rest/conf.py | Settings._settings_changed | def _settings_changed(self, *args, **kwargs):
"""Handle changes to core settings."""
setting, value = kwargs['setting'], kwargs['value']
if setting == self.name:
self._reload(value) | python | def _settings_changed(self, *args, **kwargs):
"""Handle changes to core settings."""
setting, value = kwargs['setting'], kwargs['value']
if setting == self.name:
self._reload(value) | [
"def",
"_settings_changed",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"setting",
",",
"value",
"=",
"kwargs",
"[",
"'setting'",
"]",
",",
"kwargs",
"[",
"'value'",
"]",
"if",
"setting",
"==",
"self",
".",
"name",
":",
"self",
... | Handle changes to core settings. | [
"Handle",
"changes",
"to",
"core",
"settings",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/conf.py#L134-L138 | train | 199,693 |
AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField.bind | def bind(self, *args, **kwargs):
"""Bind to the parent serializer."""
if self.bound: # Prevent double-binding
return
super(DynamicRelationField, self).bind(*args, **kwargs)
self.bound = True
parent_model = getattr(self.parent.Meta, 'model', None)
remote = is_field_remote(parent_model, self.source)
try:
model_field = get_model_field(parent_model, self.source)
except:
# model field may not be available for m2o fields with no
# related_name
model_field = None
# Infer `required` and `allow_null`
if 'required' not in self.kwargs and (
remote or (
model_field and (
model_field.has_default() or model_field.null
)
)
):
self.required = False
if 'allow_null' not in self.kwargs and getattr(
model_field, 'null', False
):
self.allow_null = True
self.model_field = model_field | python | def bind(self, *args, **kwargs):
"""Bind to the parent serializer."""
if self.bound: # Prevent double-binding
return
super(DynamicRelationField, self).bind(*args, **kwargs)
self.bound = True
parent_model = getattr(self.parent.Meta, 'model', None)
remote = is_field_remote(parent_model, self.source)
try:
model_field = get_model_field(parent_model, self.source)
except:
# model field may not be available for m2o fields with no
# related_name
model_field = None
# Infer `required` and `allow_null`
if 'required' not in self.kwargs and (
remote or (
model_field and (
model_field.has_default() or model_field.null
)
)
):
self.required = False
if 'allow_null' not in self.kwargs and getattr(
model_field, 'null', False
):
self.allow_null = True
self.model_field = model_field | [
"def",
"bind",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"bound",
":",
"# Prevent double-binding",
"return",
"super",
"(",
"DynamicRelationField",
",",
"self",
")",
".",
"bind",
"(",
"*",
"args",
",",
"*",
"... | Bind to the parent serializer. | [
"Bind",
"to",
"the",
"parent",
"serializer",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L127-L158 | train | 199,694 |
AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField._inherit_parent_kwargs | def _inherit_parent_kwargs(self, kwargs):
"""Extract any necessary attributes from parent serializer to
propagate down to child serializer.
"""
if not self.parent or not self._is_dynamic:
return kwargs
if 'request_fields' not in kwargs:
# If 'request_fields' isn't explicitly set, pull it from the
# parent serializer.
request_fields = self._get_request_fields_from_parent()
if request_fields is None:
# Default to 'id_only' for nested serializers.
request_fields = True
kwargs['request_fields'] = request_fields
if self.embed and kwargs.get('request_fields') is True:
# If 'embed' then make sure we fetch the full object.
kwargs['request_fields'] = {}
if hasattr(self.parent, 'sideloading'):
kwargs['sideloading'] = self.parent.sideloading
if hasattr(self.parent, 'debug'):
kwargs['debug'] = self.parent.debug
return kwargs | python | def _inherit_parent_kwargs(self, kwargs):
"""Extract any necessary attributes from parent serializer to
propagate down to child serializer.
"""
if not self.parent or not self._is_dynamic:
return kwargs
if 'request_fields' not in kwargs:
# If 'request_fields' isn't explicitly set, pull it from the
# parent serializer.
request_fields = self._get_request_fields_from_parent()
if request_fields is None:
# Default to 'id_only' for nested serializers.
request_fields = True
kwargs['request_fields'] = request_fields
if self.embed and kwargs.get('request_fields') is True:
# If 'embed' then make sure we fetch the full object.
kwargs['request_fields'] = {}
if hasattr(self.parent, 'sideloading'):
kwargs['sideloading'] = self.parent.sideloading
if hasattr(self.parent, 'debug'):
kwargs['debug'] = self.parent.debug
return kwargs | [
"def",
"_inherit_parent_kwargs",
"(",
"self",
",",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"parent",
"or",
"not",
"self",
".",
"_is_dynamic",
":",
"return",
"kwargs",
"if",
"'request_fields'",
"not",
"in",
"kwargs",
":",
"# If 'request_fields' isn't explic... | Extract any necessary attributes from parent serializer to
propagate down to child serializer. | [
"Extract",
"any",
"necessary",
"attributes",
"from",
"parent",
"serializer",
"to",
"propagate",
"down",
"to",
"child",
"serializer",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L212-L239 | train | 199,695 |
AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField.get_serializer | def get_serializer(self, *args, **kwargs):
"""Get an instance of the child serializer."""
init_args = {
k: v for k, v in six.iteritems(self.kwargs)
if k in self.SERIALIZER_KWARGS
}
kwargs = self._inherit_parent_kwargs(kwargs)
init_args.update(kwargs)
if self.embed and self._is_dynamic:
init_args['embed'] = True
return self._get_cached_serializer(args, init_args) | python | def get_serializer(self, *args, **kwargs):
"""Get an instance of the child serializer."""
init_args = {
k: v for k, v in six.iteritems(self.kwargs)
if k in self.SERIALIZER_KWARGS
}
kwargs = self._inherit_parent_kwargs(kwargs)
init_args.update(kwargs)
if self.embed and self._is_dynamic:
init_args['embed'] = True
return self._get_cached_serializer(args, init_args) | [
"def",
"get_serializer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"init_args",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"kwargs",
")",
"if",
"k",
"in",
"self",
".",... | Get an instance of the child serializer. | [
"Get",
"an",
"instance",
"of",
"the",
"child",
"serializer",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L241-L254 | train | 199,696 |
AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField.to_representation | def to_representation(self, instance):
"""Represent the relationship, either as an ID or object."""
serializer = self.serializer
model = serializer.get_model()
source = self.source
if not self.kwargs['many'] and serializer.id_only():
# attempt to optimize by reading the related ID directly
# from the current instance rather than from the related object
source_id = '%s_id' % source
# try the faster way first:
if hasattr(instance, source_id):
return getattr(instance, source_id)
elif model is not None:
# this is probably a one-to-one field, or a reverse related
# lookup, so let's look it up the slow way and let the
# serializer handle the id dereferencing
try:
instance = getattr(instance, source)
except model.DoesNotExist:
instance = None
# dereference ephemeral objects
if model is None:
instance = getattr(instance, source)
if instance is None:
return None
return serializer.to_representation(instance) | python | def to_representation(self, instance):
"""Represent the relationship, either as an ID or object."""
serializer = self.serializer
model = serializer.get_model()
source = self.source
if not self.kwargs['many'] and serializer.id_only():
# attempt to optimize by reading the related ID directly
# from the current instance rather than from the related object
source_id = '%s_id' % source
# try the faster way first:
if hasattr(instance, source_id):
return getattr(instance, source_id)
elif model is not None:
# this is probably a one-to-one field, or a reverse related
# lookup, so let's look it up the slow way and let the
# serializer handle the id dereferencing
try:
instance = getattr(instance, source)
except model.DoesNotExist:
instance = None
# dereference ephemeral objects
if model is None:
instance = getattr(instance, source)
if instance is None:
return None
return serializer.to_representation(instance) | [
"def",
"to_representation",
"(",
"self",
",",
"instance",
")",
":",
"serializer",
"=",
"self",
".",
"serializer",
"model",
"=",
"serializer",
".",
"get_model",
"(",
")",
"source",
"=",
"self",
".",
"source",
"if",
"not",
"self",
".",
"kwargs",
"[",
"'man... | Represent the relationship, either as an ID or object. | [
"Represent",
"the",
"relationship",
"either",
"as",
"an",
"ID",
"or",
"object",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L286-L315 | train | 199,697 |
AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField.to_internal_value_single | def to_internal_value_single(self, data, serializer):
"""Return the underlying object, given the serialized form."""
related_model = serializer.Meta.model
if isinstance(data, related_model):
return data
try:
instance = related_model.objects.get(pk=data)
except related_model.DoesNotExist:
raise ValidationError(
"Invalid value for '%s': %s object with ID=%s not found" %
(self.field_name, related_model.__name__, data)
)
return instance | python | def to_internal_value_single(self, data, serializer):
"""Return the underlying object, given the serialized form."""
related_model = serializer.Meta.model
if isinstance(data, related_model):
return data
try:
instance = related_model.objects.get(pk=data)
except related_model.DoesNotExist:
raise ValidationError(
"Invalid value for '%s': %s object with ID=%s not found" %
(self.field_name, related_model.__name__, data)
)
return instance | [
"def",
"to_internal_value_single",
"(",
"self",
",",
"data",
",",
"serializer",
")",
":",
"related_model",
"=",
"serializer",
".",
"Meta",
".",
"model",
"if",
"isinstance",
"(",
"data",
",",
"related_model",
")",
":",
"return",
"data",
"try",
":",
"instance"... | Return the underlying object, given the serialized form. | [
"Return",
"the",
"underlying",
"object",
"given",
"the",
"serialized",
"form",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L317-L329 | train | 199,698 |
AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField.serializer_class | def serializer_class(self):
"""Get the class of the child serializer.
Resolves string imports.
"""
serializer_class = self._serializer_class
if not isinstance(serializer_class, six.string_types):
return serializer_class
parts = serializer_class.split('.')
module_path = '.'.join(parts[:-1])
if not module_path:
if getattr(self, 'parent', None) is None:
raise Exception(
"Can not load serializer '%s'" % serializer_class +
' before binding or without specifying full path')
# try the module of the parent class
module_path = self.parent.__module__
module = importlib.import_module(module_path)
serializer_class = getattr(module, parts[-1])
self._serializer_class = serializer_class
return serializer_class | python | def serializer_class(self):
"""Get the class of the child serializer.
Resolves string imports.
"""
serializer_class = self._serializer_class
if not isinstance(serializer_class, six.string_types):
return serializer_class
parts = serializer_class.split('.')
module_path = '.'.join(parts[:-1])
if not module_path:
if getattr(self, 'parent', None) is None:
raise Exception(
"Can not load serializer '%s'" % serializer_class +
' before binding or without specifying full path')
# try the module of the parent class
module_path = self.parent.__module__
module = importlib.import_module(module_path)
serializer_class = getattr(module, parts[-1])
self._serializer_class = serializer_class
return serializer_class | [
"def",
"serializer_class",
"(",
"self",
")",
":",
"serializer_class",
"=",
"self",
".",
"_serializer_class",
"if",
"not",
"isinstance",
"(",
"serializer_class",
",",
"six",
".",
"string_types",
")",
":",
"return",
"serializer_class",
"parts",
"=",
"serializer_clas... | Get the class of the child serializer.
Resolves string imports. | [
"Get",
"the",
"class",
"of",
"the",
"child",
"serializer",
"."
] | 5b0338c3dd8bc638d60c3bb92645857c5b89c920 | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L346-L370 | train | 199,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.