text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate_with_serviceaccount(reactor, **kw):
""" Create an ``IAgent`` which can issue authenticated requests to a particular Kubernetes server using a service account token. :param reactor: The reactor with which to configure the resulting agent. :param bytes path: The location of the service account directory. The default should work fine for normal use within a container. :return IAgent: An agent which will authenticate itself to a particular Kubernetes server and which will verify that server or refuse to interact with it. """ |
config = KubeConfig.from_service_account(**kw)
policy = https_policy_from_config(config)
token = config.user["token"]
agent = HeaderInjectingAgent(
_to_inject=Headers({u"authorization": [u"Bearer {}".format(token)]}),
_agent=Agent(reactor, contextFactory=policy),
)
return agent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def first_time_setup(self):
"""First time running Open Sesame? Create keyring and an auto-unlock key in default keyring. Make sure these things don't already exist. """ |
if not self._auto_unlock_key_position():
pw = password.create_passwords()[0]
attrs = {'application': self.keyring}
gkr.item_create_sync(self.default_keyring
,gkr.ITEM_GENERIC_SECRET
,self.keyring
,attrs
,pw
,True)
found_pos = self._auto_unlock_key_position()
item_info = gkr.item_get_info_sync(self.default_keyring, found_pos)
gkr.create_sync(self.keyring, item_info.get_secret()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _auto_unlock_key_position(self):
"""Find the open sesame password in the default keyring """ |
found_pos = None
default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring)
for pos in default_keyring_ids:
item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos)
app = 'application'
if item_attrs.has_key(app) and item_attrs[app] == "opensesame":
found_pos = pos
break
return found_pos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_position_searchable(self):
"""Return dict of the position and corrasponding searchable str """ |
ids = gkr.list_item_ids_sync(self.keyring)
position_searchable = {}
for i in ids:
item_attrs = gkr.item_get_attributes_sync(self.keyring, i)
position_searchable[i] = item_attrs['searchable']
return position_searchable |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_exists(self, searchable):
"""Make sure the searchable description doesn't already exist """ |
position_searchable = self.get_position_searchable()
for pos,val in position_searchable.iteritems():
if val == searchable:
return pos
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_password(self, password, **attrs):
"""Save the new password, save the old password with the date prepended """ |
pos_of_match = self._match_exists(attrs['searchable'])
if pos_of_match:
old_password = self.get_password(pos_of_match).get_secret()
gkr.item_delete_sync(self.keyring, pos_of_match)
desc = str(int(time.time())) + "_" + attrs['searchable']
gkr.item_create_sync(self.keyring
,gkr.ITEM_GENERIC_SECRET
,desc
,{}
,old_password
,True)
desc = attrs['searchable']
pos = gkr.item_create_sync(self.keyring
,gkr.ITEM_GENERIC_SECRET
,desc
,attrs
,password
,True)
return pos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_descriptor_for_idcode(idcode):
"""Use this method to find bsdl descriptions for devices. The caching on this method drastically lower the execution time when there are a lot of bsdl files and more than one device. May move it into a metaclass to make it more transparent.""" |
idcode = idcode&0x0fffffff
id_str = "XXXX"+bin(idcode)[2:].zfill(28)
descr_file_path = _check_cache_for_idcode(id_str)
if descr_file_path:
with open(descr_file_path, 'r') as f:
dat = json.load(f)
if dat.get("_file_version",-1) == JTAGDeviceDescription.version:
return JTAGDeviceDescription(dat.get('idcode'),
dat.get('name'),
dat.get('ir_length'),
dat.get('instruction_opcodes'),
dat.get('registers'),
dat.get('instruction_register_map'))
print(" Device detected ("+id_str+"). Fetching missing descriptor...")
sid = get_sid(id_str)
details = get_details(sid)
attribs = decode_bsdl(sid)
#VERIFYING PARSED DATA FROM 2 SOURCES. MESSY BUT USEFUL.
instruction_length = 0
if attribs.get('INSTRUCTION_LENGTH') ==\
details.get('INSTRUCTION_LENGTH'):
instruction_length = attribs.get('INSTRUCTION_LENGTH')
elif attribs.get('INSTRUCTION_LENGTH') and\
details.get('INSTRUCTION_LENGTH'):
raise Exception("INSTRUCTION_LENGTH can not be determined")
elif attribs.get('INSTRUCTION_LENGTH'):
instruction_length = attribs.get('INSTRUCTION_LENGTH')
else:
instruction_length = details.get('INSTRUCTION_LENGTH')
for instruction_name in details.get('instructions'):
if instruction_name not in\
attribs.get('INSTRUCTION_OPCODE',[]):
raise Exception("INSTRUCTION_OPCODE sources do not match")
#print(attribs['IDCODE_REGISTER'])
descr = JTAGDeviceDescription(attribs['IDCODE_REGISTER'].upper(),
details['name'], instruction_length,
attribs['INSTRUCTION_OPCODE'],
attribs['REGISTERS'],
attribs['INSTRUCTION_TO_REGISTER'])
#CACHE DESCR AS FILE!
if not os.path.isdir(base_descr_dir):
os.makedirs(base_descr_dir)
descr_file_path = os.path.join(base_descr_dir,
attribs['IDCODE_REGISTER']\
.upper()+'.json')
with open(descr_file_path, 'w') as f:
json.dump(descr._dump(), f)
return descr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch_dimensions(self, dataset):
""" Iterate through semesters, counties and municipalities. """ |
yield Dimension(u"school")
yield Dimension(u"year",
datatype="year")
yield Dimension(u"semester",
datatype="academic_term",
dialect="swedish") # HT/VT
yield Dimension(u"municipality",
datatype="year",
domain="sweden/municipalities") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _merge_configs(configs):
""" Merge one or more ``KubeConfig`` objects. :param list[KubeConfig] configs: The configurations to merge. :return KubeConfig: A single configuration object with the merged configuration. """ |
result = {
u"contexts": [],
u"users": [],
u"clusters": [],
u"current-context": None,
}
for config in configs:
for k in {u"contexts", u"users", u"clusters"}:
try:
values = config.doc[k]
except KeyError:
pass
else:
result[k].extend(values)
if result[u"current-context"] is None:
try:
result[u"current-context"] = config.doc[u"current-context"]
except KeyError:
pass
return KubeConfig(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _merge_configs_from_env(kubeconfigs):
""" Merge configuration files from a ``KUBECONFIG`` environment variable. :param bytes kubeconfigs: A value like the one given to ``KUBECONFIG`` to specify multiple configuration files. :return KubeConfig: A configuration object which has merged all of the configuration from the specified configuration files. Merging is performed according to https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#merging-kubeconfig-files """ |
paths = list(
FilePath(p)
for p
in kubeconfigs.split(pathsep)
if p
)
config = _merge_configs(list(
KubeConfig.from_file(p.path)
for p
in paths
))
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def network_kubernetes_from_context( reactor, context=None, path=None, environ=None, default_config_path=FilePath(expanduser(u"~/.kube/config")), ):
""" Create a new ``IKubernetes`` provider based on a kube config file. :param reactor: A Twisted reactor which will be used for I/O and scheduling. :param unicode context: The name of the kube config context from which to load configuration details. Or, ``None`` to respect the current context setting from the configuration. :param FilePath path: The location of the kube config file to use. :param dict environ: A environment direction in which to look up ``KUBECONFIG``. If ``None``, the real process environment will be inspected. This is used only if ``path`` is ``None``. :return IKubernetes: The Kubernetes service described by the named context. """ |
if path is None:
if environ is None:
from os import environ
try:
kubeconfigs = environ[u"KUBECONFIG"]
except KeyError:
config = KubeConfig.from_file(default_config_path.path)
else:
config = _merge_configs_from_env(kubeconfigs)
else:
config = KubeConfig.from_file(path.path)
if context is None:
context = config.doc[u"current-context"]
context = config.contexts[context]
cluster = config.clusters[context[u"cluster"]]
user = config.users[context[u"user"]]
if isinstance(cluster[u"server"], bytes):
base_url = URL.fromText(cluster[u"server"].decode("ascii"))
else:
base_url = URL.fromText(cluster[u"server"])
[ca_cert] = parse(cluster[u"certificate-authority"].bytes())
client_chain = parse(user[u"client-certificate"].bytes())
[client_key] = parse(user[u"client-key"].bytes())
agent = authenticate_with_certificate_chain(
reactor, base_url, client_chain, client_key, ca_cert,
)
return network_kubernetes(
base_url=base_url,
agent=agent,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collection_location(obj):
""" Get the URL for the collection of objects like ``obj``. :param obj: Either a type representing a Kubernetes object kind or an instance of such a type. :return tuple[unicode]: Some path segments to stick on to a base URL to construct the location of the collection of objects like the one given. """ |
# TODO kind is not part of IObjectLoader and we should really be loading
# apiVersion off of this object too.
kind = obj.kind
apiVersion = obj.apiVersion
prefix = version_to_segments[apiVersion]
collection = kind.lower() + u"s"
if IObject.providedBy(obj):
# Actual objects *could* have a namespace...
namespace = obj.metadata.namespace
else:
# Types representing a kind couldn't possible.
namespace = None
if namespace is None:
# If there's no namespace, look in the un-namespaced area.
return prefix + (collection,)
# If there is, great, look there.
return prefix + (u"namespaces", namespace, collection) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def execute(ctx):
""" execute story part at the current context and make one step further :param ctx: :return: """ |
tail_depth = len(ctx.stack()) - 1
story_part = ctx.get_current_story_part()
logger.debug('# going to call: {}'.format(story_part.__name__))
waiting_for = story_part(ctx.message)
if inspect.iscoroutinefunction(story_part):
waiting_for = await waiting_for
logger.debug('# got result {}'.format(waiting_for))
# story part could run callable story and return its context
if isinstance(waiting_for, story_context.StoryContext):
# for such cases is very important to know `tail_depth`
# because story context from callable story already has
# few stack items above our tail
ctx = waiting_for.clone()
ctx.waiting_for = callable.WaitForReturn()
else:
ctx = ctx.clone()
ctx.waiting_for = waiting_for
tail_data = ctx.message['session']['stack'][tail_depth]['data']
tail_step = ctx.message['session']['stack'][tail_depth]['step']
if ctx.is_waiting_for_input():
if isinstance(ctx.waiting_for, callable.EndOfStory):
if isinstance(ctx.waiting_for.data, dict):
new_data = {**ctx.get_user_data(), **ctx.waiting_for.data}
else:
new_data = ctx.waiting_for.data
ctx.message = {
**ctx.message,
'session': {
**ctx.message['session'],
'data': new_data,
},
}
tail_step += 1
elif isinstance(ctx.waiting_for, loop.ScopeMatcher):
# jumping in a loop
tail_data = matchers.serialize(ctx.waiting_for)
elif isinstance(ctx.waiting_for, loop.BreakLoop):
tail_step += 1
else:
tail_data = matchers.serialize(
matchers.get_validator(ctx.waiting_for)
)
tail_step += 1
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:tail_depth] +
[{
'data': tail_data,
'step': tail_step,
'topic': stack[tail_depth]['topic'],
}] +
stack[tail_depth + 1:])
logger.debug('# mutated ctx after execute')
logger.debug(ctx)
return ctx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iterate_storyline(ctx):
""" iterate the last storyline from the last visited story part :param ctx: :return: """ |
logger.debug('# start iterate')
compiled_story = ctx.compiled_story()
if not compiled_story:
return
for step in range(ctx.current_step(),
len(compiled_story.story_line)):
ctx = ctx.clone()
tail = ctx.stack_tail()
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': tail['data'],
'step': step,
'topic': tail['topic'],
}])
logger.debug('# [{}] iterate'.format(step))
logger.debug(ctx)
ctx = yield ctx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scope_in(ctx):
""" - build new scope on the top of stack - and current scope will wait for it result :param ctx: :return: """ |
logger.debug('# scope_in')
logger.debug(ctx)
ctx = ctx.clone()
compiled_story = None
if not ctx.is_empty_stack():
compiled_story = ctx.get_child_story()
logger.debug('# child')
logger.debug(compiled_story)
# we match child story loop once by message
# what should prevent multiple matching by the same message
ctx.matched = True
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': matchers.serialize(callable.WaitForReturn()),
'step': stack[-1]['step'],
'topic': stack[-1]['topic']
}])
try:
if not compiled_story and ctx.is_scope_level_part():
compiled_story = ctx.get_current_story_part()
except story_context.MissedStoryPart:
pass
if not compiled_story:
compiled_story = ctx.compiled_story()
logger.debug('# [>] going deeper')
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack + [
stack_utils.build_empty_stack_item(compiled_story.topic)])
logger.debug(ctx)
return ctx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def str2date(self, date_str):
""" Parse date from string. If there's no template matches your string, Please go https://github.com/MacHu-GWU/rolex-project/issues submit your datetime string. I 'll update templates ASAP. This method is faster than :meth:`dateutil.parser.parse`. :param date_str: a string represent a date :type date_str: str :return: a date object **中文文档** 从string解析date。首先尝试默认模板, 如果失败了, 则尝试所有的模板。 一旦尝试成功, 就将当前成功的模板保存为默认模板。这样做在当你待解析的 字符串非常多, 且模式单一时, 只有第一次尝试耗时较多, 之后就非常快了。 该方法要快过 :meth:`dateutil.parser.parse` 方法。 """ |
# try default date template
try:
a_datetime = datetime.strptime(
date_str, self._default_date_template)
return a_datetime.date()
except:
pass
# try every date templates
for template in date_template_list:
try:
a_datetime = datetime.strptime(date_str, template)
self._default_date_template = template
return a_datetime.date()
except:
pass
# raise error
raise ValueError("Unable to parse date from: %r!" % date_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _str2datetime(self, datetime_str):
""" Parse datetime from string. If there's no template matches your string, Please go https://github.com/MacHu-GWU/rolex-project/issues submit your datetime string. I 'll update templates ASAP. This method is faster than :meth:`dateutil.parser.parse`. :param datetime_str: a string represent a datetime :type datetime_str: str :return: a datetime object **中文文档** 从string解析datetime。首先尝试默认模板, 如果失败了, 则尝试所有的模板。 一旦尝试成功, 就将当前成功的模板保存为默认模板。这样做在当你待解析的 字符串非常多, 且模式单一时, 只有第一次尝试耗时较多, 之后就非常快了。 该方法要快过 :meth:`dateutil.parser.parse` 方法。 为了防止模板库失败的情况, 程序设定在失败后自动一直启用 :meth:`dateutil.parser.parse` 进行解析。你可以调用 :meth:`Parser.reset()` 方法恢复默认设定。 """ |
# try default datetime template
try:
a_datetime = datetime.strptime(
datetime_str, self._default_datetime_template)
return a_datetime
except:
pass
# try every datetime templates
for template in datetime_template_list:
try:
a_datetime = datetime.strptime(datetime_str, template)
self._default_datetime_template = template
return a_datetime
except:
pass
# raise error
a_datetime = parse(datetime_str)
self.str2datetime = parse
return a_datetime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def define(self):
"""If DFA is empty, create a sink state""" |
if len(self.states) == 0:
for char in self.alphabet:
self.add_arc(0, 0, char)
self[0].final = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_state(self):
"""Adds a new state""" |
sid = len(self.states)
self.states.append(DFAState(sid))
return sid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _epsilon_closure(self, state):
""" Returns the \epsilon-closure for the state given as input. """ |
closure = set([state.stateid])
stack = [state]
while True:
if not stack:
break
s = stack.pop()
for arc in s:
if self.isyms.find(arc.ilabel) != EPSILON or \
arc.nextstate in closure:
continue
closure.add(arc.nextstate)
stack.append(self.states[arc.nextstate])
return closure |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def invert(self):
"""Inverts the DFA final states""" |
for state in self.states:
if state.final:
state.final = False
else:
state.final = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_list(self):
"""
returns a list version of the object, based on it's attributes
""" |
if hasattr(self, 'cust_list'):
return self.cust_list
if hasattr(self, 'attr_check'):
self.attr_check()
cls_bltns = set(dir(self.__class__))
ret = [a for a in dir(self) if a not in cls_bltns and getattr(self, a)]
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_dict(self):
"""
returns an dict version of the object, based on it's attributes
""" |
if hasattr(self, 'cust_dict'):
return self.cust_dict
if hasattr(self, 'attr_check'):
self.attr_check()
cls_bltns = set(dir(self.__class__))
return {a: getattr(self, a) for a in dir(self) if a not in cls_bltns} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_odict(self):
"""
returns an odict version of the object, based on it's attributes
""" |
if hasattr(self, 'cust_odict'):
return self.cust_odict
if hasattr(self, 'attr_check'):
self.attr_check()
odc = odict()
for attr in self.attrorder:
odc[attr] = getattr(self, attr)
return odc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_and_parse(url, bodyLines):
"""Takes a url, and returns a dictionary of data with 'bodyLines' lines""" |
pageHtml = fetch_page(url)
return parse(url, pageHtml, bodyLines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_rec(source, dest):
"""Copy files between diferent directories. Copy one or more files to an existing directory. This function is recursive, if the source is a directory, all its subdirectories are created in the destination. Existing files in destination are overwrited without any warning. Args: source (str):
File or directory name. dest (str):
Directory name. Raises: FileNotFoundError: Destination directory doesn't exist. """ |
if os.path.isdir(source):
for child in os.listdir(source):
new_dest = os.path.join(dest, child)
os.makedirs(new_dest, exist_ok=True)
copy_rec(os.path.join(source, child), new_dest)
elif os.path.isfile(source):
logging.info(' Copy "{}" to "{}"'.format(source, dest))
shutil.copy(source, dest)
else:
logging.info(' Ignoring "{}"'.format(source)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(self):
""" Builds this object into the desired output information. """ |
signed = bool(self.options() & Builder.Options.Signed)
# remove previous build information
buildpath = self.buildPath()
if not buildpath:
raise errors.InvalidBuildPath(buildpath)
# setup the environment
for key, value in self.environment().items():
log.info('SET {0}={1}'.format(key, value))
os.environ[key] = value
if os.path.exists(buildpath):
shutil.rmtree(buildpath)
# generate the build path for the installer
os.makedirs(buildpath)
# create the output path
outpath = self.outputPath()
if not os.path.exists(outpath):
os.makedirs(outpath)
# copy license information
src = self.licenseFile()
if src and os.path.exists(src):
targ = os.path.join(buildpath, 'license.txt')
shutil.copyfile(src, targ)
# generate revision information
if self.options() & Builder.Options.GenerateRevision:
self.generateRevision()
# generate documentation information
if self.options() & Builder.Options.GenerateDocs:
self.generateDocumentation(buildpath)
# generate setup file
if self.options() & Builder.Options.GenerateSetupFile:
setuppath = os.path.join(self.sourcePath(), '..')
egg = (self.options() & Builder.Options.GenerateEgg) != 0
self.generateSetupFile(setuppath, egg=egg)
# generate executable information
if self.options() & Builder.Options.GenerateExecutable:
if not self.generateExecutable(signed=signed):
return
# generate zipfile information
if self.options() & Builder.Options.GenerateZipFile:
self.generateZipFile(self.outputPath())
# generate installer information
if self.options() & Builder.Options.GenerateInstaller:
self.generateInstaller(buildpath, signed=signed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateRevision(self):
""" Generates the revision file for this builder. """ |
revpath = self.sourcePath()
if not os.path.exists(revpath):
return
# determine the revision location
revfile = os.path.join(revpath, self.revisionFilename())
mode = ''
# test for svn revision
try:
args = ['svn', 'info', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'svn'
except WindowsError:
try:
args = ['git', 'rev-parse', 'HEAD', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'git'
except WindowsError:
return
# process SVN revision
rev = None
if mode == 'svn':
for line in proc.stdout:
data = re.match('^Revision: (\d+)', line)
if data:
rev = int(data.group(1))
break
if rev is not None:
try:
f = open(revfile, 'w')
f.write('__revision__ = {0}\n'.format(rev))
f.close()
except IOError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateSetupFile(self, outpath='.', egg=False):
""" Generates the setup file for this builder. """ |
outpath = os.path.abspath(outpath)
outfile = os.path.join(outpath, 'setup.py')
opts = {
'name': self.name(),
'distname': self.distributionName(),
'version': self.version(),
'author': self.author(),
'author_email': self.authorEmail(),
'keywords': self.keywords(),
'license': self.license(),
'brief': self.brief(),
'description': self.description(),
'url': self.companyUrl()
}
wrap_dict = lambda x: map(lambda k: "r'{0}': [{1}]".format(k[0],
',\n'.join(wrap_str(k[1]))),
x.items())
opts['dependencies'] = ',\n'.join(wrap_str(self.dependencies()))
opts['classifiers'] = ',\n'.join(wrap_str(self.classifiers()))
if os.path.isfile(self.sourcePath()):
basepath = os.path.normpath(os.path.dirname(self.sourcePath()))
else:
basepath = os.path.normpath(self.sourcePath())
self.generatePlugins(basepath)
exts = set()
for root, folders, files in os.walk(basepath):
for file_ in files:
_, ext = os.path.splitext(file_)
if ext not in ('.py', '.pyc', '.pyo'):
exts.add('*' + ext)
exts = list(exts)
text = templ.SETUPFILE.format(**opts)
# generate the file
if not os.path.exists(outfile):
f = open(outfile, 'w')
f.write(text)
f.close()
# generate the manifest file
manfile = os.path.join(outpath, 'MANIFEST.in')
if not os.path.exists(manfile):
f = open(manfile, 'w')
f.write('include *.md *.txt *.ini *.cfg *.rst\n')
f.write('recursive-include {0} {1}\n'.format(self.name(), ' '.join(exts)))
f.close()
# generate the egg
if egg:
cmd = 'cd {0} && $PYTHON setup.py bdist_egg'.format(outpath)
cmd = os.path.expandvars(cmd)
cmdexec(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateZipFile(self, outpath='.'):
""" Generates the zip file for this builder. """ |
fname = self.installName() + '.zip'
outfile = os.path.abspath(os.path.join(outpath, fname))
# clears out the exiting archive
if os.path.exists(outfile):
try:
os.remove(outfile)
except OSError:
log.warning('Could not remove zipfile: %s', outfile)
return False
# generate the zip file
zfile = zipfile.ZipFile(outfile, 'w')
# zip up all relavent fields from the code base
if os.path.isfile(self.sourcePath()):
zfile.write(self.sourcePath(), os.path.basename(self.sourcePath()))
else:
basepath = os.path.abspath(os.path.join(self.sourcePath(), '..'))
baselen = len(basepath) + 1
for root, folders, filenames in os.walk(basepath):
# ignore hidden folders
if '.svn' in root or '.git' in root:
continue
# ignore setuptools build info
part = root[baselen:].split(os.path.sep)[0]
if part in ('build', 'dist') or part.endswith('.egg-info'):
continue
# include files
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext in self.ignoreFileTypes():
continue
arcroot = root[baselen:].replace('\\', '/')
arcname = os.path.join(arcroot, filename)
log.info('Archiving %s...', arcname)
zfile.write(os.path.join(root, filename), arcname)
zfile.close()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_undefined_step_snippets_should_exist_for_table(context):
""" Checks if undefined-step snippets are provided. EXAMPLE: Then undefined-step snippets should exist for: | Step | | When an undefined step is used | | Then another undefined step is used | """ |
assert context.table, "REQUIRES: table"
for row in context.table.rows:
step = row["Step"]
step_undefined_step_snippet_should_exist_for(context, step) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_undefined_step_snippets_should_not_exist_for_table(context):
""" Checks if undefined-step snippets are not provided. EXAMPLE: Then undefined-step snippets should not exist for: | Step | | When an known step is used | | Then another known step is used | """ |
assert context.table, "REQUIRES: table"
for row in context.table.rows:
step = row["Step"]
step_undefined_step_snippet_should_not_exist_for(context, step) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mixin (cls):
""" A decorator which adds event methods to a class giving it the ability to bind to and trigger events :param cls: the class to add the event logic to :type cls: class :return: the modified class :rtype: class """ |
cls._events = {}
cls.bind = Pyevent.bind.__func__
cls.unbind = Pyevent.unbind.__func__
cls.trigger = Pyevent.trigger.__func__
return cls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_options(paths,fname_def=None):
"""Builds a configuration reader function""" |
def reader_func(fname=fname_def, sect=None, sett=None, default=None):
"""Reads the configuration for trump"""
cur_dir = os.path.dirname(os.path.realpath(__file__))
config_dir = os.path.join(cur_dir, *paths)
config_files = [(f[:-4], f)
for f in os.listdir(config_dir) if f[-4:] == ".cfg"]
sample_files = [(f[:-11], f)
for f in os.listdir(config_dir) if f[-11:] == ".cfg_sample"]
if fname:
config_files = [f for f in config_files if f[0] == fname]
sample_files = [f for f in sample_files if f[0] == fname]
config_files = dict(config_files)
sample_files = dict(sample_files)
cfg_files = sample_files
for fn, f in config_files.iteritems():
cfg_files[fn] = f
sample_files_exposed = []
confg = {}
for src, fil in cfg_files.iteritems():
confg[src] = {}
cfpr = ConfigParser.ConfigParser()
cfpr.read(os.path.join(config_dir, fil))
for sec in cfpr.sections():
confg[src][sec] = dict(cfpr.items(sec))
if ".cfg_sample" in fil:
sample_files_exposed.append(fil)
if len(sample_files_exposed) > 0:
msg = ", ".join(sample_files_exposed)
body = "{} sample configuration files have been exposed. " \
"Rename *.cfg_sample to *.cfg, and populate the " \
"correct settings in the config and settings " \
"directories to avoid this warning."
msg = body.format(msg)
warnings.warn(msg)
keys = []
if fname:
keys.append(fname)
if sect:
keys.append(sect)
if sett:
keys.append(sett)
try:
return get_from_nested(keys, confg)
except KeyError:
if default is not None:
return default
else:
raise
return reader_func |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def returnLabelState(peptide, labelDescriptor, labelSymbols=None, labelAminoacids=None):
"""Calculates the label state of a given peptide for the label setup described in labelDescriptor :param peptide: peptide which label state should be calcualted :param labelDescriptor: :class:`LabelDescriptor`, describes the label setup of an experiment. :param labelSymbols: modifications that show a label, as returned by :func:`modSymbolsFromLabelInfo`. :param labelAminoacids: amino acids that can bear a label, as returned by :func:`modAminoacidsFromLabelInfo`. :returns: integer that shows the label state: >=0: predicted label state of the peptide -1: peptide sequence can't bear any labelState modifications -2: peptide modifications don't fit to any predicted labelState -3: peptide modifications fit to a predicted labelState, but not all predicted labelStates are distinguishable """ |
if labelSymbols is None:
labelSymbols = modSymbolsFromLabelInfo(labelDescriptor)
if labelAminoacids is None:
labelAminoacids = modAminoacidsFromLabelInfo(labelDescriptor)
sequence = maspy.peptidemethods.removeModifications(peptide)
modPositions = maspy.peptidemethods.returnModPositions(peptide,
indexStart=0,
removeModString=False)
labelState = None
#No amino acids in sequence which can bear a label modification
#Note: at the moment presence of excluding modifications are ignored
_validator = lambda seq, aa: (True if seq.find(aa) == -1 else False)
if all([_validator(sequence, aa) for aa in labelAminoacids]):
#No terminal label modifications specified by labelDescriptor
if 'nTerm' not in labelAminoacids and 'cTerm' not in labelAminoacids:
labelState = -1
# Check if the peptide mofidifcations fit to any predicted label state
if labelState is None:
peptideLabelPositions = dict()
for labelSymbol in labelSymbols:
if labelSymbol in viewkeys(modPositions):
for sequencePosition in modPositions[labelSymbol]:
peptideLabelPositions.setdefault(sequencePosition, list())
peptideLabelPositions[sequencePosition].append(labelSymbol)
for sequencePosition in list(viewkeys(peptideLabelPositions)):
peptideLabelPositions[sequencePosition] = \
sorted(peptideLabelPositions[sequencePosition])
predictedLabelStates = dict()
for predictedLabelState, labelStateInfo in viewitems(labelDescriptor.labels):
expectedLabelMods = expectedLabelPosition(peptide, labelStateInfo,
sequence=sequence,
modPositions=modPositions)
predictedLabelStates[predictedLabelState] = expectedLabelMods
if peptideLabelPositions == expectedLabelMods:
#If another expectedLabel state has already been matched, then
#there is an ambiguity between label states ...
labelState = predictedLabelState
if labelState is None:
# Peptide mofidifcations don't fit to any predicted label state
labelState = -2
elif labelState != -1:
# Check if all predicted label states are distinguishable
_comb = set(itertools.combinations(range(len(predictedLabelStates)), 2))
for state1, state2 in _comb:
if predictedLabelStates[state1] == predictedLabelStates[state2]:
labelState = -3
break
return labelState |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modSymbolsFromLabelInfo(labelDescriptor):
"""Returns a set of all modiciation symbols which were used in the labelDescriptor :param labelDescriptor: :class:`LabelDescriptor` describes the label setup of an experiment :returns: #TODO: docstring """ |
modSymbols = set()
for labelStateEntry in viewvalues(labelDescriptor.labels):
for labelPositionEntry in viewvalues(labelStateEntry['aminoAcidLabels']):
for modSymbol in aux.toList(labelPositionEntry):
if modSymbol != '':
modSymbols.add(modSymbol)
return modSymbols |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modAminoacidsFromLabelInfo(labelDescriptor):
"""Returns a set of all amino acids and termini which can bear a label, as described in "labelDescriptor". :param labelDescriptor: :class:`LabelDescriptor` describes the label setup of an experiment :returns: #TODO: docstring """ |
modAminoacids = set()
for labelStateEntry in viewvalues(labelDescriptor.labels):
for labelPositionEntry in viewkeys(labelStateEntry['aminoAcidLabels']):
for modAminoacid in aux.toList(labelPositionEntry):
if modAminoacid != '':
modAminoacids.add(modAminoacid)
return modAminoacids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expectedLabelPosition(peptide, labelStateInfo, sequence=None, modPositions=None):
"""Returns a modification description of a certain label state of a peptide. :param peptide: Peptide sequence used to calculat the expected label state modifications :param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that describes a label state :param sequence: unmodified amino acid sequence of :var:`peptide`, if None it is generated by :func:`maspy.peptidemethods.removeModifications()` :param modPositions: dictionary describing the modification state of "peptide", if None it is generated by :func:`maspy.peptidemethods.returnModPositions()` :returns: {sequence position: sorted list of expected label modifications } """ |
if modPositions is None:
modPositions = maspy.peptidemethods.returnModPositions(peptide,
indexStart=0
)
if sequence is None:
sequence = maspy.peptidemethods.removeModifications(peptide)
currLabelMods = dict()
for labelPosition, labelSymbols in viewitems(labelStateInfo['aminoAcidLabels']):
labelSymbols = aux.toList(labelSymbols)
if labelSymbols == ['']:
pass
elif labelPosition == 'nTerm':
currLabelMods.setdefault(0, list())
currLabelMods[0].extend(labelSymbols)
else:
for sequencePosition in aux.findAllSubstrings(sequence,
labelPosition):
currLabelMods.setdefault(sequencePosition, list())
currLabelMods[sequencePosition].extend(labelSymbols)
if labelStateInfo['excludingModifications'] is not None:
for excludingMod, excludedLabelSymbol in viewitems(labelStateInfo['excludingModifications']):
if excludingMod not in modPositions:
continue
for excludingModPos in modPositions[excludingMod]:
if excludingModPos not in currLabelMods:
continue
if excludedLabelSymbol not in currLabelMods[excludingModPos]:
continue
if len(currLabelMods[excludingModPos]) == 1:
del(currLabelMods[excludingModPos])
else:
excludedModIndex = currLabelMods[excludingModPos].index(excludedLabelSymbol)
currLabelMods[excludingModPos].pop(excludedModIndex)
for sequencePosition in list(viewkeys(currLabelMods)):
currLabelMods[sequencePosition] = sorted(currLabelMods[sequencePosition])
return currLabelMods |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addLabel(self, aminoAcidLabels, excludingModifications=None):
"""Adds a new labelstate. :param aminoAcidsLabels: Describes which amino acids can bear which labels. Possible keys are the amino acids in one letter code and 'nTerm', 'cTerm'. Possible values are the modifications ids from :attr:`maspy.constants.aaModMass` as strings or a list of strings. An example for one expected label at the n-terminus and two expected labels at each Lysine: ``{'nTerm': 'u:188', 'K': ['u:188', 'u:188']}`` :param excludingModifications: optional, A Dectionary that describes which modifications can prevent the addition of labels. Keys and values have to be the modifications ids from :attr:`maspy.constants.aaModMass`. The key specifies the modification that prevents the label modification specified by the value. For example for each modification 'u:1' that is present at an amino acid or terminus of a peptide the number of expected labels at this position is reduced by one: ``{'u:1':'u:188'}`` """ |
if excludingModifications is not None:
self.excludingModifictions = True
labelEntry = {'aminoAcidLabels': aminoAcidLabels,
'excludingModifications': excludingModifications
}
self.labels[self._labelCounter] = labelEntry
self._labelCounter += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_gen_slice(ctx=Bubble(), iterable=[], amount=-1, index=-1):
"""very crude way of slicing a generator""" |
ctx.gbc.say('get_gen_slice', stuff=iterable, verbosity=10)
i = -1
# TODO
# i = 0 #NATURAL INDEX, this will break all features with exports and -p
if amount > 0:
if index < 0:
index = 0
else:
for item in iterable:
i += 1
item[buts('index')] = i
ctx.gbc.say('Get gen NO slice:item %d' % i, verbosity=100)
ctx.gbc.say('Get gen NO slice:a:%d i:%d' %
(amount, index), verbosity=100)
ctx.gbc.say('Get gen NO slice:item', stuff=item, verbosity=1000)
yield item
until = index + amount
if six.PY2:
sli = xrange(index, until)
else:
sli = range(index, until)
ctx.gbc.say('Get gen slice:range %s' % str(sli), verbosity=1000)
# TODO: iterable should be empty if not slicing
# if valid slice ...
for item in iterable:
i += 1
if i in sli:
ctx.gbc.say('Get gen slice:item %d' % i, verbosity=100)
ctx.gbc.say('Get gen slice:a:%d i:%d' %
(amount, index), verbosity=100)
ctx.gbc.say('Get gen slice:item', stuff=item, verbosity=1000)
item[buts('index')] = i
yield item
elif i > until:
break
else:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_scripts(self, scripts_path_rel, files_deployment, script_type, project_path):
""" Gets scripts from specified folders """ |
scripts_dict = {}
if scripts_path_rel:
self._logger.debug('Getting scripts with {0} definitions'.format(script_type))
scripts_dict = pgpm.lib.utils.misc.collect_scripts_from_sources(scripts_path_rel, files_deployment,
project_path, False, self._logger)
if len(scripts_dict) == 0:
self._logger.debug('No {0} definitions were found in {1} folder'.format(script_type, scripts_path_rel))
else:
self._logger.debug('No {0} folder was specified'.format(script_type))
return scripts_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_dependencies(self, cur, dependencies):
""" Function checks if dependant packages are installed in DB """ |
list_of_deps_ids = []
_list_of_deps_unresolved = []
_is_deps_resolved = True
for k, v in dependencies.items():
pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name)
cur.execute("SELECT _find_schema('{0}', '{1}')"
.format(k, v))
pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(','))
try:
list_of_deps_ids.append(int(pgpm_v_ext[0]))
except:
pass
if not pgpm_v_ext[0]:
_is_deps_resolved = False
_list_of_deps_unresolved.append("{0}: {1}".format(k, v))
return _is_deps_resolved, list_of_deps_ids, _list_of_deps_unresolved |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _reorder_types(self, types_script):
""" Takes type scripts and reorders them to avoid Type doesn't exist exception """ |
self._logger.debug('Running types definitions scripts')
self._logger.debug('Reordering types definitions scripts to avoid "type does not exist" exceptions')
_type_statements = sqlparse.split(types_script)
# TODO: move up to classes
_type_statements_dict = {} # dictionary that store statements with type and order.
type_unordered_scripts = [] # scripts to execute without order
type_drop_scripts = [] # drop scripts to execute first
for _type_statement in _type_statements:
_type_statement_parsed = sqlparse.parse(_type_statement)
if len(_type_statement_parsed) > 0: # can be empty parsed object so need to check
# we need only type declarations to be ordered
if _type_statement_parsed[0].get_type() == 'CREATE':
_type_body_r = r'\bcreate\s+\b(?:type|domain)\s+\b(\w+\.\w+|\w+)\b'
_type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0]
_type_statements_dict[str(_type_name)] = \
{'script': _type_statement, 'deps': []}
elif _type_statement_parsed[0].get_type() == 'DROP':
type_drop_scripts.append(_type_statement)
else:
type_unordered_scripts.append(_type_statement)
# now let's add dependant types to dictionary with types
# _type_statements_list = [] # list of statements to be ordered
for _type_key in _type_statements_dict.keys():
for _type_key_sub, _type_value in _type_statements_dict.items():
if _type_key != _type_key_sub:
if pgpm.lib.utils.misc.find_whole_word(_type_key)(_type_value['script']):
_type_value['deps'].append(_type_key)
# now let's add order to type scripts and put them ordered to list
_deps_unresolved = True
_type_script_order = 0
_type_names = []
type_ordered_scripts = [] # ordered list with scripts to execute
while _deps_unresolved:
for k, v in _type_statements_dict.items():
if not v['deps']:
_type_names.append(k)
v['order'] = _type_script_order
_type_script_order += 1
if not v['script'] in type_ordered_scripts:
type_ordered_scripts.append(v['script'])
else:
_dep_exists = True
for _dep in v['deps']:
if _dep not in _type_names:
_dep_exists = False
if _dep_exists:
_type_names.append(k)
v['order'] = _type_script_order
_type_script_order += 1
if not v['script'] in type_ordered_scripts:
type_ordered_scripts.append(v['script'])
else:
v['order'] = -1
_deps_unresolved = False
for k, v in _type_statements_dict.items():
if v['order'] == -1:
_deps_unresolved = True
return type_drop_scripts, type_ordered_scripts, type_unordered_scripts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_table_links(self):
""" When given a url, this function will find all the available table names for that EPA dataset. """ |
html = urlopen(self.model_url).read()
doc = lh.fromstring(html)
href_list = [area.attrib['href'] for area in doc.cssselect('map area')]
tables = self._inception_table_links(href_list)
return tables |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_definition_urls(self, set_of_links):
"""Find the available definition URLs for the columns in a table.""" |
definition_dict = {}
re_link_name = re.compile('.*p_table_name=(\w+)&p_topic.*')
for link in set_of_links:
if link.startswith('http://'):
table_dict = {}
html = urlopen(link).read()
doc = lh.fromstring(html)
unordered_list = doc.cssselect('#main ul')[-1]
for li in unordered_list.iterchildren():
a = li.find('a')
table_dict.update({a.text: a.attrib['href']})
link_name = re_link_name.sub(r'\1', link).upper()
definition_dict.update({link_name: table_dict})
return definition_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_agency(self):
"""Create an agency text file of definitions.""" |
agency = self.agency
links = self.find_table_links()
definition_dict = self.find_definition_urls(links)
with open(agency + '.txt', 'w') as f:
f.write(str(definition_dict)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loop_through_agency(self):
"""Loop through an agency to grab the definitions for its tables.""" |
agency = self.agency
with open(agency + '.txt') as f:
data = eval(f.read())
for table in data:
for column in data[table]:
value_link = data[table][column]
data[table][column] = self.grab_definition(value_link)
data = json.dumps(data)
with open(agency + '_values.json', 'w') as f:
f.write(str(data)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grab_definition(self, url):
""" Grab the column definition of a table from the EPA using a combination of regular expressions and lxml. """ |
re_description = re.compile('Description:(.+?\\n)')
re_table_name = re.compile("(\w+ Table.+)")
if url.startswith('//'):
url = 'http:' + url
elif url.startswith('/'):
url = 'http://www.epa.gov' + url
try:
html = urlopen(url).read()
doc = lh.fromstring(html)
main = doc.cssselect('#main')[0]
text = main.text_content()
definition = re_description.search(text).group(1).strip()
except (AttributeError, IndexError, TypeError, HTTPError):
print url
else:
value = re_table_name.sub('', definition)
return value
return url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(*argv, filesystem=None, do_exit=True, stdout=None, stderr=None):
"""Main method for the cli. We allow the filesystem to be overridden for test purposes.""" |
try:
mdcli = MdCLI()
mdcli.filesystem = filesystem
mdcli.stdout = stdout or sys.stdout
mdcli.stderr = stderr or sys.stderr
retval = mdcli.main(*argv, loop=LOOP_NEVER)
if do_exit:
sys.exit(retval)
else:
return retval
except KeyboardInterrupt:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_optparser(self):
"""Override to allow specification of the maildir""" |
p = Cmdln.get_optparser(self)
p.add_option(
"-M",
"--maildir",
action="store",
dest="maildir"
)
p.add_option(
"-V",
"--verbose",
action="store_true",
dest="verbose"
)
return p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def form(context, form, **kwargs):
""" The `form` template tag will render a tape-form enabled form using the template provided by `get_layout_template` method of the form using the context generated by `get_layout_context` method of the form. Usage:: {% load tapeforms %} {% form my_form %} You can override the used layout template using the keyword argument `using`:: {% load tapeforms %} {% form my_form using='other_form_layout_template.html' %} :param form: The Django form to render. :return: Rendered form (errors + hidden fields + fields) as HTML. """ |
if not isinstance(form, (forms.BaseForm, TapeformFieldset)):
raise template.TemplateSyntaxError(
'Provided form should be a `Form` instance, actual type: {0}'.format(
form.__class__.__name__))
return render_to_string(
form.get_layout_template(kwargs.get('using', None)),
form.get_layout_context(),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def formfield(context, bound_field, **kwargs):
""" The `formfield` template tag will render a form field of a tape-form enabled form using the template provided by `get_field_template` method of the form together with the context generated by `get_field_context` method of the form. Usage:: {% load tapeforms %} {% formfield my_form.my_field %} You can override the used field template using the keyword argument `using`:: {% load tapeforms %} {% formfield my_form.my_field using='other_field_template.html' %} :param bound_field: The `BoundField` from a Django form to render. :return: Rendered field (label + widget + other stuff) as HTML. """ |
if not isinstance(bound_field, forms.BoundField):
raise template.TemplateSyntaxError(
'Provided field should be a `BoundField` instance, actual type: {0}'.format(
bound_field.__class__.__name__))
return render_to_string(
bound_field.form.get_field_template(bound_field, kwargs.get('using', None)),
bound_field.form.get_field_context(bound_field),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def wrap_as_node(self, func):
'wrap a function as a node'
name = self.get_name(func)
@wraps(func)
def wrapped(*args, **kwargs):
'wrapped version of func'
message = self.get_message_from_call(*args, **kwargs)
self.logger.info('calling "%s" with %r', name, message)
result = func(message)
# functions can return multiple values ("emit" multiple times)
# by yielding instead of returning. Handle this case by making
# a list of the results and processing them all after the
# generator successfully exits. If we were to process them as
# they came out of the generator, we might get a partially
# processed input sent down the graph. This may be possible in
# the future via a flag.
if isinstance(result, GeneratorType):
results = [
self.wrap_result(name, item)
for item in result
if item is not NoResult
]
self.logger.debug(
'%s returned generator yielding %d items', func, len(results)
)
[self.route(name, item) for item in results]
return tuple(results)
# the case of a direct return is simpler. wrap, route, and
# return the value.
else:
if result is NoResult:
return result
result = self.wrap_result(name, result)
self.logger.debug(
'%s returned single value %s', func, result
)
self.route(name, result)
return result
return wrapped |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def node(self, fields, subscribe_to=None, entry_point=False, ignore=None,
**wrapper_options):
'''\
Decorate a function to make it a node.
.. note::
decorating as a node changes the function signature. Nodes should
accept a single argument, which will be a
:py:class:`emit.message.Message`. Nodes can be called directly by
providing a dictionary argument or a set of keyword arguments. Other
uses will raise a ``TypeError``.
:param fields: fields that this function returns
:type fields: ordered iterable of :py:class:`str`
:param subscribe_to: functions in the graph to subscribe to. These
indicators can be regular expressions.
:type subscribe_to: :py:class:`str` or iterable of :py:class:`str`
:param ignore: functions in the graph to ignore (also uses regular
expressions.) Useful for ignoring specific functions in
a broad regex.
:type ignore: :py:class:`str` or iterable of :py:class:`str`
:param entry_point: Set to ``True`` to mark this as an entry point -
that is, this function will be called when the
router is called directly.
:type entry_point: :py:class:`bool`
In addition to all of the above, you can define a ``wrap_node``
function on a subclass of Router, which will need to receive node and
an options dictionary. Any extra options passed to node will be passed
down to the options dictionary. See
:py:class:`emit.router.CeleryRouter.wrap_node` as an example.
:returns: decorated and wrapped function, or decorator if called directly
'''
def outer(func):
'outer level function'
# create a wrapper function
self.logger.debug('wrapping %s', func)
wrapped = self.wrap_as_node(func)
if hasattr(self, 'wrap_node'):
self.logger.debug('wrapping node "%s" in custom wrapper', wrapped)
wrapped = self.wrap_node(wrapped, wrapper_options)
# register the task in the graph
name = self.get_name(func)
self.register(
name, wrapped, fields, subscribe_to, entry_point, ignore
)
return wrapped
return outer |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def resolve_node_modules(self):
'import the modules specified in init'
if not self.resolved_node_modules:
try:
self.resolved_node_modules = [
importlib.import_module(mod, self.node_package)
for mod in self.node_modules
]
except ImportError:
self.resolved_node_modules = []
raise
return self.resolved_node_modules |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_message_from_call(self, *args, **kwargs):
'''\
Get message object from a call.
:raises: :py:exc:`TypeError` (if the format is not what we expect)
This is where arguments to nodes are turned into Messages. Arguments
are parsed in the following order:
- A single positional argument (a :py:class:`dict`)
- No positional arguments and a number of keyword arguments
'''
if len(args) == 1 and isinstance(args[0], dict):
# then it's a message
self.logger.debug('called with arg dictionary')
result = args[0]
elif len(args) == 0 and kwargs != {}:
# then it's a set of kwargs
self.logger.debug('called with kwargs')
result = kwargs
else:
# it's neither, and we don't handle that
self.logger.error(
'get_message_from_call could not handle "%r", "%r"',
args, kwargs
)
raise TypeError('Pass either keyword arguments or a dictionary argument')
return self.message_class(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def register(self, name, func, fields, subscribe_to, entry_point, ignore):
'''
Register a named function in the graph
:param name: name to register
:type name: :py:class:`str`
:param func: function to remember and call
:type func: callable
``fields``, ``subscribe_to`` and ``entry_point`` are the same as in
:py:meth:`Router.node`.
'''
self.fields[name] = fields
self.functions[name] = func
self.register_route(subscribe_to, name)
if ignore:
self.register_ignore(ignore, name)
if entry_point:
self.add_entry_point(name)
self.logger.info('registered %s', name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_entry_point(self, destination):
'''\
Add an entry point
:param destination: node to route to initially
:type destination: str
'''
self.routes.setdefault('__entry_point', set()).add(destination)
return self.routes['__entry_point'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def register_route(self, origins, destination):
'''
Add routes to the routing dictionary
:param origins: a number of origins to register
:type origins: :py:class:`str` or iterable of :py:class:`str` or None
:param destination: where the origins should point to
:type destination: :py:class:`str`
Routing dictionary takes the following form::
{'node_a': set(['node_b', 'node_c']),
'node_b': set(['node_d'])}
'''
self.names.add(destination)
self.logger.debug('added "%s" to names', destination)
origins = origins or [] # remove None
if not isinstance(origins, list):
origins = [origins]
self.regexes.setdefault(destination, [re.compile(origin) for origin in origins])
self.regenerate_routes()
return self.regexes[destination] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def register_ignore(self, origins, destination):
'''
Add routes to the ignore dictionary
:param origins: a number of origins to register
:type origins: :py:class:`str` or iterable of :py:class:`str`
:param destination: where the origins should point to
:type destination: :py:class:`str`
Ignore dictionary takes the following form::
{'node_a': set(['node_b', 'node_c']),
'node_b': set(['node_d'])}
'''
if not isinstance(origins, list):
origins = [origins]
self.ignore_regexes.setdefault(destination, [re.compile(origin) for origin in origins])
self.regenerate_routes()
return self.ignore_regexes[destination] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def regenerate_routes(self):
'regenerate the routes after a new route is added'
for destination, origins in self.regexes.items():
# we want only the names that match the destination regexes.
resolved = [
name for name in self.names
if name is not destination
and any(origin.search(name) for origin in origins)
]
ignores = self.ignore_regexes.get(destination, [])
for origin in resolved:
destinations = self.routes.setdefault(origin, set())
if any(ignore.search(origin) for ignore in ignores):
self.logger.info('ignoring route "%s" -> "%s"', origin, destination)
try:
destinations.remove(destination)
self.logger.debug('removed "%s" -> "%s"', origin, destination)
except KeyError:
pass
continue
if destination not in destinations:
self.logger.info('added route "%s" -> "%s"', origin, destination)
destinations.add(destination) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def route(self, origin, message):
'''\
Using the routing dictionary, dispatch a message to all subscribers
:param origin: name of the origin node
:type origin: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
'''
# side-effect: we have to know all the routes before we can route. But
# we can't resolve them while the object is initializing, so we have to
# do it just in time to route.
self.resolve_node_modules()
if not self.routing_enabled:
return
subs = self.routes.get(origin, set())
for destination in subs:
self.logger.debug('routing "%s" -> "%s"', origin, destination)
self.dispatch(origin, destination, message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dispatch(self, origin, destination, message):
'''\
dispatch a message to a named function
:param destination: destination to dispatch to
:type destination: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
'''
func = self.functions[destination]
self.logger.debug('calling %r directly', func)
return func(_origin=origin, **message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def wrap_result(self, name, result):
'''
Wrap a result from a function with it's stated fields
:param name: fields to look up
:type name: :py:class:`str`
:param result: return value from function. Will be converted to tuple.
:type result: anything
:raises: :py:exc:`ValueError` if name has no associated fields
:returns: :py:class:`dict`
'''
if not isinstance(result, tuple):
result = tuple([result])
try:
return dict(zip(self.fields[name], result))
except KeyError:
msg = '"%s" has no associated fields'
self.logger.exception(msg, name)
raise ValueError(msg % name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_name(self, func):
'''
Get the name to reference a function by
:param func: function to get the name of
:type func: callable
'''
if hasattr(func, 'name'):
return func.name
return '%s.%s' % (
func.__module__,
func.__name__
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Testing function for PDA - DFA Diff Operation """ |
if len(argv) < 2:
print 'Usage: '
print ' Get A String %s CFG_fileA FST_fileB' % argv[0]
return
alphabet = createalphabet()
cfgtopda = CfgPDA(alphabet)
print '* Parsing Grammar:',
mma = cfgtopda.yyparse(argv[1])
print 'OK'
flex_a = Flexparser(alphabet)
print '* Parsing Regex:',
mmb = flex_a.yyparse(argv[2])
print mmb
print 'OK'
print '* Minimize Automaton:',
mmb.minimize()
print 'OK'
print mmb
print '* Diff:',
ops = PdaDiff(mma, mmb, alphabet)
mmc = ops.diff()
print 'OK'
print '* Get String:',
print ops.get_string() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _intesect(self):
"""The intesection of a PDA and a DFA""" |
p1automaton = self.mma
p2automaton = self.mmb
p3automaton = PDA(self.alphabet)
self._break_terms()
p1counter = 0
p3counter = 0
p2states = list(p2automaton.states)
print 'PDA States: ' + repr(p1automaton.n)
print 'DFA States: ' + repr(len(list(p2states)))
ignorechars = p1automaton.nonterminals+ [0] + ['@closing']
del(ignorechars[ignorechars.index('S')])
while p1counter < p1automaton.n + 1:
p1state = p1automaton.s[p1counter]
p2counter = 0
while p2counter < len(list(p2states)):
p2state = p2states[p2counter]
tempstate = PDAState()
tempstate.id = (p1state.id, p2state.stateid)
tempstate.sym = p1state.sym
tempstate.type = p1state.type
tempstate.trans = {}
found = 0
for char in self.alphabet:
if char in ignorechars:
continue
# DFA has single destination from a state
p2dest = self._delta(p2automaton, p2state, char)
# PDA may have multiple destinations from a state
# print p1state.trans
if p2dest is not None:
for potential in p1state.trans:
if char in p1state.trans[potential]:
found = 1
p1dest = potential
if (p1dest,
p2dest.stateid) not in tempstate.trans:
tempstate.trans[
(p1dest, p2dest.stateid)] = []
# print 'Appending A Transition to
# ('+`p1dest`+','+`p2dest.stateid`+') for
# input '+`char`
tempstate.trans[
(p1dest, p2dest.stateid)].append(char)
# THEN THE NONTERMINALS + 0 3 transitions
# print p1state.trans
# print p1automaton.nonterminals
if found == 0 and p1state.type == 3 and len(p1state.trans) >0:
assert 1==1,'Check Failed: A READ state with transitions' \
' did not participate in the cross product'
if p2dest is not None:
for nonterm in p1automaton.nonterminals + \
[0] + ['@closing']:
for potential in p1state.trans:
if nonterm in p1state.trans[potential]:
p1dest = potential
if (p1dest,
p2state.stateid) not in tempstate.trans:
tempstate.trans[
(p1dest, p2state.stateid)] = []
# print 'Appending B Transition to
# ('+`p1dest`+','+`p2state.stateid`+') for
# input '+`nonterm`
tempstate.trans[
(p1dest, p2state.stateid)].append(nonterm)
p3automaton.s[p3counter] = tempstate
p3counter = p3counter + 1
p2counter = p2counter + 1
p1counter = p1counter + 1
# print 'Total States Appended '+`len(p3automaton.input_string)`
p3automaton.n = p3counter - 1
p3automaton.accepted = []
for state in p2automaton.states:
if state.final != TropicalWeight(float('inf')):
p3automaton.accepted.append(state.stateid)
return p3automaton |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diff(self):
"""The Difference between a PDA and a DFA""" |
self.mmb.complement(self.alphabet)
self.mmb.minimize()
print 'start intersection'
self.mmc = self._intesect()
print 'end intersection'
return self.mmc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def refresh_devices(self):
'''Queries hub for list of devices, and creates new device objects'''
try:
response = self.api.get("/api/v2/devices", {'properties':'all'})
for device_data in response['DeviceList']:
self.devices.append(Device(device_data, self))
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def refresh_details(self):
'''Query hub and refresh all details of a device,
but NOT status, includes grouplist not present in
refresh_all_devices'''
try:
return self.api_iface._api_get("/api/v2/devices/" + str(self.device_id))
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def send_command(self, command):
'''Send a command to a device'''
data = {"command": command, "device_id": self.device_id}
try:
response = self.api_iface._api_post("/api/v2/commands", data)
return Command(response, self)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _update_details(self,data):
'''Intakes dict of details, and sets necessary properties
in device'''
# DeviceName, IconID, HouseID, DeviceID always present
self.device_id = data['DeviceID']
self.device_name = data['DeviceName']
self.properties = data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _update_details(self,data):
'''Intakes dict of details, and sets necessary properties
in command'''
for api_name in self._properties:
if api_name in data:
setattr(self, "_" + api_name, data[api_name])
else:
# Only set to blank if not initialized
try:
getattr(self, "_" + api_name)
except AttributeError:
setattr(self, "_" + api_name, '') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def query_status(self):
'''Query the hub for the status of this command'''
try:
data = self.api_iface._api_get(self.link)
self._update_details(data)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tracks(self):
""" Tracks list context :return: Tracks list context """ |
if self._tracks is None:
self._tracks = TrackList(self.version, self.id)
return self._tracks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extension(names):
"""Makes a function to be an extension.""" |
for name in names:
if not NAME_PATTERN.match(name):
raise ValueError('invalid extension name: %s' % name)
def decorator(f, names=names):
return Extension(f, names=names)
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, extensions):
"""Registers extensions.""" |
for ext in reversed(extensions):
for name in ext.names:
try:
self._extensions[name].appendleft(ext)
except KeyError:
self._extensions[name] = deque([ext]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eval_extensions(self, value, name, option, format):
"""Evaluates extensions in the registry. If some extension handles the format string, it returns a string. Otherwise, returns ``None``. """ |
try:
exts = self._extensions[name]
except KeyError:
raise ValueError('no suitable extension: %s' % name)
for ext in exts:
rv = ext(self, value, name, option, format)
if rv is not None:
return rv |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GetShowDetails(self):
""" Extract show name, season number and episode number from file name. Supports formats S<NUM>E<NUM> or <NUM>x<NUM> for season and episode numbers where letters are case insensitive and number can be one or more digits. It expects season number to be unique however it can handle either single or multipart episodes (consecutive values only). All information preceeding season number is used for the show name lookup. This string is forced to lowercase and stripped of special characters Returns boolean False if an incompatible file name is found, otherwise return True. """ |
fileName = os.path.splitext(os.path.basename(self.fileInfo.origPath))[0]
# Episode Number
episodeNumSubstring = set(re.findall("(?<=[0-9])[xXeE][0-9]+(?:[xXeE_.-][0-9]+)*", fileName))
if len(episodeNumSubstring) != 1:
goodlogging.Log.Info("TVFILE", "Incompatible filename no episode match detected: {0}".format(self.fileInfo.origPath))
return False
episodeNumSet = set(re.findall("(?<=[xXeE_.-])[0-9]+", episodeNumSubstring.pop()))
episodeNumList = [int(i) for i in episodeNumSet]
episodeNumList.sort()
episodeNum = "{0}".format(episodeNumList[0])
if len(episodeNumList) > 1:
episodeNumReference = episodeNumList[0]
for episodeNumIter in episodeNumList[1:]:
if episodeNumIter == (episodeNumReference+1):
strNum = "{0}".format(episodeNumIter)
if len(strNum) == 1:
strNum = "0{0}".format(strNum)
self.showInfo.multiPartEpisodeNumbers.append(strNum)
episodeNumReference = episodeNumIter
else:
break
if len(episodeNum) == 1:
episodeNum = "0{0}".format(episodeNum)
self.showInfo.episodeNum = episodeNum
# Season Number
seasonNumSet = set(re.findall("[sS]([0-9]+)", fileName))
preceedingS = True
if len(seasonNumSet) == 1:
seasonNum = seasonNumSet.pop()
else:
seasonNumSet = set(re.findall("([0-9]+)[xX](?:[0-9]+[xX])*", fileName))
preceedingS = False
if len(seasonNumSet) == 1:
seasonNum = seasonNumSet.pop()
else:
goodlogging.Log.Info("TVFILE", "Incompatible filename no season match detected: {0}".format(self.fileInfo.origPath))
return False
if len(seasonNum) == 1:
seasonNum = "0{0}".format(seasonNum)
self.showInfo.seasonNum = seasonNum
# Show Name
if preceedingS is True:
showNameList = re.findall("(.+?)\s*[_.-]*\s*[sS][0-9]+[xXeE][0-9]+.*", fileName)
else:
showNameList = re.findall("(.+?)\s*[_.-]*\s*[0-9]+[xXeE][0-9]+.*", fileName)
if len(showNameList) == 1:
showName = util.StripSpecialCharacters(showNameList[0].lower(), stripAll=True)
else:
goodlogging.Log.Info("TVFILE", "Incompatible filename no show name detected: {0}".format(self.fileInfo.origPath))
return False
self.fileInfo.showName = showName
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GenerateNewFilePath(self, fileDir = None):
""" Create new file path. If a fileDir is provided it will be used otherwise the original file path is used. Updates file info object with new path. Parameters fileDir : string [optional : default = None] Optional file directory """ |
newFileName = self.GenerateNewFileName()
if newFileName is not None:
if fileDir is None:
fileDir = os.path.dirname(self.fileInfo.origPath)
self.fileInfo.newPath = os.path.join(fileDir, newFileName) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Print(self):
""" Print contents of showInfo and FileInfo object """ |
goodlogging.Log.Info("TVFILE", "TV File details are:")
goodlogging.Log.IncreaseIndent()
goodlogging.Log.Info("TVFILE", "Original File Path = {0}".format(self.fileInfo.origPath))
if self.showInfo.showName is not None:
goodlogging.Log.Info("TVFILE", "Show Name (from guide) = {0}".format(self.showInfo.showName))
elif self.fileInfo.showName is not None:
goodlogging.Log.Info("TVFILE", "Show Name (from file) = {0}".format(self.fileInfo.showName))
if self.showInfo.seasonNum is not None and self.showInfo.episodeNum is not None:
goodlogging.Log.Info("TVFILE", "Season & Episode = S{0}E{1}".format(self.showInfo.seasonNum, self.showInfo.episodeNum))
if self.showInfo.episodeName is not None:
goodlogging.Log.Info("TVFILE", "Episode Name: = {0}".format(self.showInfo.episodeName))
if self.fileInfo.newPath is not None:
goodlogging.Log.Info("TVFILE", "New File Path = {0}".format(self.fileInfo.newPath))
goodlogging.Log.DecreaseIndent() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connectProcess(connection, processProtocol, commandLine='', env={}, usePTY=None, childFDs=None, *args, **kwargs):
"""Opens a SSHSession channel and connects a ProcessProtocol to it @param connection: the SSH Connection to open the session channel on @param processProtocol: the ProcessProtocol instance to connect to the process @param commandLine: the command line to execute the process @param env: optional environment variables to set for the process @param usePTY: if set, request a PTY for the process @param childFDs: custom child file descriptors for the process """ |
processOpenDeferred = defer.Deferred()
process = SSHProcess(processProtocol, commandLine, env, usePTY, childFDs,
*args, **kwargs)
process.processOpen = processOpenDeferred.callback
process.openFailed = processOpenDeferred.errback
connection.openChannel(process)
return processOpenDeferred |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_builder_init(cls, kb_app, sphinx_app: Sphinx):
""" On builder init event, commit registry and do callbacks """ |
# Find and commit docs project plugins
conf_dir = sphinx_app.confdir
plugins_dir = sphinx_app.config.kaybee_settings.plugins_dir
full_plugins_dir = os.path.join(conf_dir, plugins_dir)
if os.path.exists(full_plugins_dir):
sys.path.insert(0, conf_dir)
plugin_package = importlib.import_module(plugins_dir)
importscan.scan(plugin_package)
else:
logger.info(f'## Kaybee: No plugin dir at {plugins_dir}')
dectate.commit(kb_app)
for callback in cls.get_callbacks(kb_app, SphinxEvent.BI):
callback(kb_app, sphinx_app) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_purge_doc(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docname: str):
""" On env-purge-doc, do callbacks """ |
for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EPD):
callback(kb_app, sphinx_app, sphinx_env, docname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_env_before_read_docs(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames: List[str]):
""" On env-read-docs, do callbacks""" |
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.EBRD):
callback(kb_app, sphinx_app, sphinx_env, docnames) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_env_doctree_read(cls, kb_app, sphinx_app: Sphinx, doctree: doctree):
""" On doctree-read, do callbacks""" |
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.DREAD):
callback(kb_app, sphinx_app, doctree) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_env_updated(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment):
""" On the env-updated event, do callbacks """ |
for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EU):
callback(kb_app, sphinx_app, sphinx_env) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_html_collect_pages(cls, kb_app, sphinx_app: Sphinx):
""" On html-collect-pages, do callbacks""" |
EventAction.get_callbacks(kb_app,
SphinxEvent.HCP)
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.HCP):
yield callback(kb_app, sphinx_app) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_env_check_consistency(cls, kb_app, builder: StandaloneHTMLBuilder, sphinx_env: BuildEnvironment):
""" On env-check-consistency, do callbacks""" |
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.ECC):
callback(kb_app, builder, sphinx_env) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_layout_template(self, template_name=None):
""" Returns the layout template to use when rendering the form to HTML. Preference of template selection: 1. Provided method argument `template_name` 2. Form class property `layout_template` 3. Globally defined default template from `defaults.LAYOUT_DEFAULT_TEMPLATE` :param template_name: Optional template to use instead of other configurations. :return: Template name to use when rendering the form. """ |
if template_name:
return template_name
if self.layout_template:
return self.layout_template
return defaults.LAYOUT_DEFAULT_TEMPLATE |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_layout_context(self):
""" Returns the context which is used when rendering the form to HTML. The generated template context will contain the following variables: * form: `Form` instance * errors: `ErrorList` instance with non field errors and hidden field errors * hidden_fields: All hidden fields to render. * visible_fields: All visible fields to render. :return: Template context for form rendering. """ |
errors = self.non_field_errors()
for field in self.hidden_fields():
errors.extend(field.errors)
return {
'form': self,
'errors': errors,
'hidden_fields': self.hidden_fields(),
'visible_fields': self.visible_fields(),
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_template(self, bound_field, template_name=None):
""" Returns the field template to use when rendering a form field to HTML. Preference of template selection: 1. Provided method argument `template_name` 2. Template from `field_template_overrides` selected by field name 3. Template from `field_template_overrides` selected by field class 4. Form class property `field_template` 5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE` :param bound_field: `BoundField` instance to select a template for. :param template_name: Optional template to use instead of other configurations. :return: Template name to use when rendering the form field. """ |
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_label_css_class(self, bound_field):
""" Returns the optional label CSS class to use when rendering a field template. By default, returns the Form class property `field_label_css_class`. If the field has errors and the Form class property `field_label_invalid_css_class` is defined, its value is appended to the CSS class. :param bound_field: `BoundField` instance to return CSS class for. :return: A CSS class string or `None` """ |
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_context(self, bound_field):
""" Returns the context which is used when rendering a form field to HTML. The generated template context will contain the following variables: * form: `Form` instance * field: `BoundField` instance of the field * field_id: Field ID to use in `<label for="..">` * field_name: Name of the form field to render * errors: `ErrorList` instance with errors of the field * required: Boolean flag to signal if the field is required or not * label: The label text of the field * label_css_class: The optional label CSS class, might be `None` * help_text: Optional help text for the form field. Might be `None` * container_css_class: The CSS class for the field container. * widget_class_name: Lowercased version of the widget class name (e.g. 'textinput') * widget_input_type: `input_type` property of the widget instance, falls back to `widget_class_name` if not available. :return: Template context for field rendering. """ |
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_widget_template(self, field_name):
""" Applies widget template overrides if available. The method uses the `get_widget_template` method to determine if the widget template should be exchanged. If a template is available, the template_name property of the widget instance is updated. :param field_name: A field name of the form. """ |
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_widget_template(self, field_name, field):
""" Returns the optional widget template to use when rendering the widget for a form field. Preference of template selection: 1. Template from `widget_template_overrides` selected by field name 2. Template from `widget_template_overrides` selected by widget class By default, returns `None` which means "use Django's default widget template". :param field_name: The field name to select a widget template for. :param field: `Field` instance to return a widget template. :return: Template name to use when rendering the widget or `None` """ |
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_widget_css_class(self, field_name):
""" Applies CSS classes to widgets if available. The method uses the `get_widget_css_class` method to determine if the widget CSS class should be changed. If a CSS class is returned, it is appended to the current value of the class property of the widget instance. :param field_name: A field name of the form. """ |
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_widget_invalid_options(self, field_name):
""" Applies additional widget options for an invalid field. This method is called when there is some error on a field to apply additional options on its widget. It does the following: * Sets the aria-invalid property of the widget for accessibility. * Adds an invalid CSS class, which is determined by the returned value of `get_widget_invalid_css_class` method. If a CSS class is returned, it is appended to the current value of the class property of the widget. :param field_name: A field name of the form. """ |
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def use_quandl_data(self, authtoken):
""" Use quandl data to build conversion table """ |
dfs = {}
st = self.start.strftime("%Y-%m-%d")
at = authtoken
for pair in self.pairs:
symbol = "".join(pair)
qsym = "CURRFX/{}".format(symbol)
dfs[symbol] = qdl.get(qsym,authtoken=at, trim_start=st)['Rate']
self.build_conversion_table(dfs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_conversion_table(self, dataframes):
""" Build conversion table from a dictionary of dataframes """ |
self.data = pd.DataFrame(dataframes)
tmp_pairs = [s.split("/") for s in self.data.columns]
self.data.columns = pd.MultiIndex.from_tuples(tmp_pairs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.