text stringlengths 0 828 |
|---|
int: Slope between self and other. |
"""""" |
X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y |
Y3 = Y1 - Y2 |
X3 = X1 - X2 |
return (Y3 * self.inverse(X3)) % self.P" |
675,"def to_jacobian(self): |
"""""" |
Converts this point to a Jacobian representation. |
Returns: |
JacobianPoint: The Jacobian representation. |
"""""" |
if not self: |
return JacobianPoint(X=0, Y=0, Z=0) |
return JacobianPoint(X=self.X, Y=self.Y, Z=1)" |
676,"def import_model(self, name, path=""floyd.db.models""): |
""""""imports a model of name from path, returning from local model |
cache if it has been previously loaded otherwise importing"""""" |
if name in self._model_cache: |
return self._model_cache[name] |
try: |
model = getattr(__import__(path, None, None, [name]), name) |
self._model_cache[name] = model |
except ImportError: |
return False |
return model" |
677,"def parse_md(self): |
""""""Takes a post path and returns a dictionary of variables"""""" |
post_content = _MARKDOWN.convert(self.raw_src) |
if hasattr(_MARKDOWN, 'Meta'): |
# 'Meta' in _MARKDOWN and _MARKDOWN.Meta: |
for key in _MARKDOWN.Meta: |
print ""\t meta: %s: %s (%s)"" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0])) |
if key == 'pubdate': |
setattr(self, key, datetime.datetime.fromtimestamp(float(_MARKDOWN.Meta[key][0]))) |
else: |
setattr(self, key, _MARKDOWN.Meta[key][0]) |
self.content = post_content |
self.stub = self.__key__ |
# set required fields |
# @TODO required in schema rather than here |
if not hasattr(self, 'pubdate'): |
print '\t Notice: setting default pubdate' |
setattr(self, 'pubdate', datetime.datetime.now())" |
678,"def filter(self, **kwargs): |
# @TODO refactor with models as dicts |
""""""filter results of dataset eg. |
Query('Posts').filter(post_type='post') |
"""""" |
f_field = kwargs.keys()[0] |
f_value = kwargs[f_field] |
_newset = [] |
for m in self._dataset: |
if hasattr(m, f_field): |
if getattr(m, f_field) == f_value: |
_newset.append(m) |
self._dataset = _newset |
return self" |
679,"def sort_by(self, sb): |
""""""Sort results"""""" |
self._dataset = self._dataset.sort(key=lambda x: x.pubdate, reverse=True) |
return self" |
680,"def execute_train_task_with_dependencies(self, task_cls, **kwargs): |
"""""" |
Run the training, as well as any dependencies of the training |
task_cls - class of a task |
"""""" |
log.info(""Task {0}"".format(get_task_name(task_cls))) |
#Instantiate the task |
task_inst = task_cls() |
#Grab arguments from the task instance and set them |
for arg in task_inst.args: |
if arg not in kwargs: |
kwargs[arg] = task_inst.args[arg] |
#Check for dependencies defined by the task |
if hasattr(task_inst, ""dependencies""): |
deps = task_inst.dependencies |
dep_results = [] |
#Run the dependencies through recursion (in case of dependencies of dependencies, etc) |
for dep in deps: |
log.info(""Dependency {0}"".format(get_task_name(dep))) |
dep_results.append(self.execute_train_task_with_dependencies(dep.cls, **dep.args)) |
trained_dependencies = [] |
#Add executed dependency to trained_dependencies list on the task |
for i in xrange(0,len(deps)): |
dep = deps[i] |
dep_result = dep_results[i] |
name = dep.name |
namespace = dep.namespace |
category = dep.category |
trained_dependencies.append(TrainedDependency(category=category, namespace=namespace, name = name, inst = dep)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.