text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_from(fname, ext=COMPLETE, fmt=HTML):
""" Reads in a file and performs MultiMarkdown conversion, with transclusion ocurring based on the file directory. Returns the converted string. Keyword arguments: fname -- Filename of document to convert ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use """ |
dname = os.path.abspath(os.path.dirname(fname))
with open(fname, 'r') as fp:
src = fp.read()
return convert(src, ext, fmt, dname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def manifest(txt, dname):
"""Extracts file manifest for a body of text with the given directory.""" |
_, files = _expand_source(txt, dname, HTML)
return files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keys(source, ext=COMPLETE):
"""Extracts metadata keys from the provided MultiMarkdown text. Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield for extracting MultiMarkdown """ |
_MMD_LIB.extract_metadata_keys.restype = ctypes.c_char_p
_MMD_LIB.extract_metadata_keys.argtypes = [ctypes.c_char_p, ctypes.c_ulong]
src = source.encode('utf-8')
all_keys = _MMD_LIB.extract_metadata_keys(src, ext)
all_keys = all_keys.decode('utf-8') if all_keys else ''
key_list = [ii for ii in all_keys.split('\n') if ii]
return key_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value(source, key, ext=COMPLETE):
"""Extracts value for the specified metadata key from the given extension set. Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield for processing text key -- key to extract """ |
_MMD_LIB.extract_metadata_value.restype = ctypes.c_char_p
_MMD_LIB.extract_metadata_value.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p]
src = source.encode('utf-8')
dkey = key.encode('utf-8')
value = _MMD_LIB.extract_metadata_value(src, ext, dkey)
return value.decode('utf-8') if value else '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tweet(self, text, in_reply_to=None, filename=None, file=None):
""" Post a new tweet. :param text: the text to post :param in_reply_to: The ID of the tweet to reply to :param filename: If `file` param is not provided, read file from this path :param file: A file object, which will be used instead of opening `filename`. `filename` is still required, for MIME type detection and to use as a form field in the POST data :return: Tweet object """ |
if filename is None:
return Tweet(self._client.update_status(status=text, in_reply_to_status_id=in_reply_to)._json)
else:
return Tweet(self._client.update_with_media(filename=filename, file=file,
status=text, in_reply_to_status_id=in_reply_to)._json) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retweet(self, id):
""" Retweet a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise """ |
try:
self._client.retweet(id=id)
return True
except TweepError as e:
if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR:
return False
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tweet(self, id):
""" Get an existing tweet. :param id: ID of the tweet in question :return: Tweet object. None if not found """ |
try:
return Tweet(self._client.get_status(id=id)._json)
except TweepError as e:
if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR:
return None
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user(self, id):
""" Get a user's info. :param id: ID of the user in question :return: User object. None if not found """ |
try:
return User(self._client.get_user(user_id=id)._json)
except TweepError as e:
if e.api_code == TWITTER_USER_NOT_FOUND_ERROR:
return None
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_tweet(self, id):
""" Delete a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise """ |
try:
self._client.destroy_status(id=id)
return True
except TweepError as e:
if e.api_code in [TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_DELETE_OTHER_USER_TWEET]:
return False
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_list(self, name, mode='public', description=None):
""" Create a list :param name: Name of the new list :param mode: :code:`'public'` (default) or :code:`'private'` :param description: Description of the new list :return: The new list object :rtype: :class:`~responsebot.models.List` """ |
return List(tweepy_list_to_json(self._client.create_list(name=name, mode=mode, description=description))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy_list(self, list_id):
""" Destroy a list :param list_id: list ID number :return: The destroyed list object :rtype: :class:`~responsebot.models.List` """ |
return List(tweepy_list_to_json(self._client.destroy_list(list_id=list_id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_list(self, list_id, name=None, mode=None, description=None):
""" Update a list :param list_id: list ID number :param name: New name for the list :param mode: :code:`'public'` (default) or :code:`'private'` :param description: New description of the list :return: The updated list object :rtype: :class:`~responsebot.models.List` """ |
return List(tweepy_list_to_json(
self._client.update_list(list_id=list_id, name=name, mode=mode, description=description))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_timeline(self, list_id, since_id=None, max_id=None, count=20):
""" List the tweets of specified list. :param list_id: list ID number :param since_id: results will have ID greater than specified ID (more recent than) :param max_id: results will have ID less than specified ID (older than) :param count: number of results per page :return: list of :class:`~responsebot.models.Tweet` objects """ |
statuses = self._client.list_timeline(list_id=list_id, since_id=since_id, max_id=max_id, count=count)
return [Tweet(tweet._json) for tweet in statuses] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, list_id):
""" Get info of specified list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """ |
return List(tweepy_list_to_json(self._client.get_list(list_id=list_id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_list_member(self, list_id, user_id):
""" Add a user to list :param list_id: list ID number :param user_id: user ID number :return: :class:`~responsebot.models.List` object """ |
return List(tweepy_list_to_json(self._client.add_list_member(list_id=list_id, user_id=user_id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_list_member(self, list_id, user_id):
""" Remove a user from a list :param list_id: list ID number :param user_id: user ID number :return: :class:`~responsebot.models.List` object """ |
return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_members(self, list_id):
""" List users in a list :param list_id: list ID number :return: list of :class:`~responsebot.models.User` objects """ |
return [User(user._json) for user in self._client.list_members(list_id=list_id)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_list_member(self, list_id, user_id):
""" Check if a user is member of a list :param list_id: list ID number :param user_id: user ID number :return: :code:`True` if user is member of list, :code:`False` otherwise """ |
try:
return bool(self._client.show_list_member(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subscribe_list(self, list_id):
""" Subscribe to a list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """ |
return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unsubscribe_list(self, list_id):
""" Unsubscribe to a list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """ |
return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_subscribers(self, list_id):
""" List subscribers of a list :param list_id: list ID number :return: :class:`~responsebot.models.User` object """ |
return [User(user._json) for user in self._client.list_subscribers(list_id=list_id)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_subscribed_list(self, list_id, user_id):
""" Check if user is a subscribed of specified list :param list_id: list ID number :param user_id: user ID number :return: :code:`True` if user is subscribed of list, :code:`False` otherwise """ |
try:
return bool(self._client.show_list_subscriber(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth(config):
""" Perform authentication with Twitter and return a client instance to communicate with Twitter :param config: ResponseBot config :type config: :class:`~responsebot.utils.config_utils.ResponseBotConfig` :return: client instance to execute twitter action :rtype: :class:`~responsebot.responsebot_client.ResponseBotClient` :raises: :class:`~responsebot.common.exceptions.AuthenticationError`: If failed to authenticate :raises: :class:`~responsebot.common.exceptions.APIQuotaError`: If API call rate reached limit """ |
auth = tweepy.OAuthHandler(config.get('consumer_key'), config.get('consumer_secret'))
auth.set_access_token(config.get('token_key'), config.get('token_secret'))
api = tweepy.API(auth)
try:
api.verify_credentials()
except RateLimitError as e:
raise APIQuotaError(e.args[0][0]['message'])
except TweepError as e:
raise AuthenticationError(e.args[0][0]['message'])
else:
logging.info('Successfully authenticated as %s' % api.me().screen_name)
return ResponseBotClient(config=config, client=api) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding: """Binding the instance see openbrokerapi documentation """ |
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def post(self, path, data={}):
'''Perform POST Request '''
response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.post, path, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parsed(self):
"""Get the ConfigParser object which represents the content. This property is cached and only parses the content once. """ |
if not self._parsed:
self._parsed = ConfigParser()
self._parsed.readfp(io.StringIO(self.content))
return self._parsed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
""" Create a html cache. Html string will be automatically compressed. :param directory: path for the cache directory. :param compress_level: 0 ~ 9, 9 is slowest and smallest. :param kwargs: other arguments. :return: a `diskcache.Cache()` """ |
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timeticks(tdiff):
""" NOTE do NOT use "interval" or ticks are misaligned! use "bysecond" only! """ |
if isinstance(tdiff, xarray.DataArray): # len==1
tdiff = timedelta(seconds=tdiff.values / np.timedelta64(1, 's'))
assert isinstance(tdiff, timedelta), 'expecting datetime.timedelta'
if tdiff > timedelta(hours=2):
return None, None
elif tdiff > timedelta(minutes=20):
return MinuteLocator(byminute=range(0, 60, 5)), MinuteLocator(byminute=range(0, 60, 2))
elif (timedelta(minutes=10) < tdiff) & (tdiff <= timedelta(minutes=20)):
return MinuteLocator(byminute=range(0, 60, 2)), MinuteLocator(byminute=range(0, 60, 1))
elif (timedelta(minutes=5) < tdiff) & (tdiff <= timedelta(minutes=10)):
return MinuteLocator(byminute=range(0, 60, 1)), SecondLocator(bysecond=range(0, 60, 30))
elif (timedelta(minutes=1) < tdiff) & (tdiff <= timedelta(minutes=5)):
return SecondLocator(bysecond=range(0, 60, 30)), SecondLocator(bysecond=range(0, 60, 10))
elif (timedelta(seconds=30) < tdiff) & (tdiff <= timedelta(minutes=1)):
return SecondLocator(bysecond=range(0, 60, 10)), SecondLocator(bysecond=range(0, 60, 2))
else:
return SecondLocator(bysecond=range(0, 60, 2)), SecondLocator(bysecond=range(0, 60, 1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consume(self, msg):
"""Called with each incoming fedmsg. From here we trigger an rpm-ostree compose by touching a specific file under the `touch_dir`. Then our `doRead` method is called with the output of the rpm-ostree-toolbox treecompose, which we monitor to determine when it has completed. """ |
self.log.info(msg)
body = msg['body']
topic = body['topic']
repo = None
if 'rawhide' in topic:
arch = body['msg']['arch']
self.log.info('New rawhide %s compose ready', arch)
repo = 'rawhide'
elif 'branched' in topic:
arch = body['msg']['arch']
branch = body['msg']['branch']
self.log.info('New %s %s branched compose ready', branch, arch)
log = body['msg']['log']
if log != 'done':
self.log.warn('Compose not done?')
return
repo = branch
elif 'updates.fedora' in topic:
self.log.info('New Fedora %(release)s %(repo)s compose ready',
body['msg'])
repo = 'f%(release)s-%(repo)s' % body['msg']
else:
self.log.warn('Unknown topic: %s', topic)
release = self.releases[repo]
reactor.callInThread(self.compose, release) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_addr(text):
"Parse a 1- to 3-part address spec."
if text:
parts = text.split(':')
length = len(parts)
if length== 3:
return parts[0], parts[1], int(parts[2])
elif length == 2:
return None, parts[0], int(parts[1])
elif length == 1:
return None, '', int(parts[0])
return None, None, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def start(self):
"Start the service"
# register signals
gevent.signal(signal.SIGINT, self._shutdown)
# spawn the flush trigger
def _flush_impl():
while 1:
gevent.sleep(self._stats.interval)
# rotate stats
stats = self._stats
self._reset_stats()
# send the stats to the sink which in turn broadcasts
# the stats packet to one or more hosts.
try:
self._sink.send(stats)
except Exception, ex:
trace = traceback.format_tb(sys.exc_info()[-1])
self.error(''.join(trace))
self._flush_task = gevent.spawn(_flush_impl)
# start accepting connections
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
self._sock.bind(self._bindaddr)
while 1:
try:
data, _ = self._sock.recvfrom(MAX_PACKET)
for p in data.split('\n'):
if p:
self._process(p)
except Exception, ex:
self.error(str(ex)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process(self, data):
"Process a single packet and update the internal tables."
parts = data.split(':')
if self._debug:
self.error('packet: %r' % data)
if not parts:
return
# interpret the packet and update stats
stats = self._stats
key = parts[0].translate(KEY_TABLE, KEY_DELETIONS)
if self._key_prefix:
key = '.'.join([self._key_prefix, key])
for part in parts[1:]:
srate = 1.0
fields = part.split('|')
length = len(fields)
if length < 2:
continue
value = fields[0]
stype = fields[1].strip()
with stats_lock:
# timer (milliseconds)
if stype == 'ms':
stats.timers[key].append(float(value if value else 0))
# counter with optional sample rate
elif stype == 'c':
if length == 3 and fields[2].startswith('@'):
srate = float(fields[2][1:])
value = float(value if value else 1) * (1 / srate)
stats.counts[key] += value
elif stype == 'g':
value = float(value if value else 1)
stats.gauges[key] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def section(self, resources):
""" Which section is this in, if any """ |
section = [p for p in self.parents(resources) if p.rtype == 'section']
if section:
return section[0]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_navitem(self, resources, nav_href):
""" Given href of nav item, determine if resource is in it """ |
# The navhref might end with '/index' so remove it if so
if nav_href.endswith('/index'):
nav_href = nav_href[:-6]
return self.docname.startswith(nav_href) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_published(self):
""" Return true if this resource has published date in the past """ |
now = datetime.now()
published = self.props.published
if published:
return published < now
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_driver(self, **kwargs):
""" Create webdriver, assign it to ``self.driver``, and run webdriver initiation process, which is usually used for manual login. """ |
if self.driver is None:
self.driver = self.create_driver(**kwargs)
self.init_driver_func(self.driver) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deserialize_time(data):
"""Return a time instance based on the values of the data param""" |
parsed = parser.parse(data)
return parsed.time().replace(tzinfo=parsed.tzinfo) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def require(*args, **kwargs):
'''
Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package.
'''
# If called with no arguments, returns requirements list
if not args and not kwargs:
return freeze()
# Construct array of requirements
requirements = list(args)
extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = ['install', '-q']
args.extend(requirements)
pip.main(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle(self, *args, **options):
""" Compares current database with a migrations. Creates a temporary database, applies all the migrations to it, and then dumps the schema from both current and temporary, diffs them, then report the diffs to the user. """ |
self.db = options.get("database", DEFAULT_DB_ALIAS)
self.current_name = connections[self.db].settings_dict["NAME"]
self.compare_name = options.get("db_name")
self.lines = options.get("lines")
self.ignore = int(options.get('ignore'))
if not self.compare_name:
self.compare_name = "%s_compare" % self.current_name
command = NASHVEGAS.get("dumpdb", "pg_dump -s {dbname}")
print "Getting schema for current database..."
current_sql = Popen(
command.format(dbname=self.current_name),
shell=True,
stdout=PIPE
).stdout.readlines()
print "Getting schema for fresh database..."
self.setup_database()
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.compare_name
try:
call_command("syncdb", interactive=False, verbosity=0, migrations=False)
new_sql = Popen(
command.format(dbname=self.compare_name).split(),
stdout=PIPE
).stdout.readlines()
finally:
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.current_name
self.teardown_database()
print "Outputing diff between the two..."
print "".join(difflib.unified_diff(normalize_sql(current_sql, self.ignore),
normalize_sql(new_sql, self.ignore),
n=int(self.lines))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_widgets(kb_app: kb, sphinx_app: Sphinx, doctree: doctree, fromdocname: str, ):
""" Go through docs and replace widget directive with rendering """ |
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth_string(self):
""" Get the auth string. If the token is expired and auto refresh enabled, a new token will be fetched :return: the auth string :rtype: str """ |
if not self._token:
self.execute()
if not self._token.expired:
return 'Bearer {}'.format(self._token.access_token)
if self.auto_refresh:
self.execute()
return 'Bearer {}'.format(self._token.access_token)
raise TokenExpired() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def admin_penalty(self, column=None, value=None, **kwargs):
""" An enforcement action that results in levying the permit holder with a penalty or fine. It is used to track judicial hearing dates, penalty amounts, and type of administrative penalty order. """ |
return self._resolve_call('PCS_ADMIN_PENALTY_ORDER', column,
value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compliance_schedule(self, column=None, value=None, **kwargs):
""" A sequence of activities with associated milestones which pertains to a given permit. """ |
return self._resolve_call('PCS_CMPL_SCHD', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compliance_violation(self, column=None, value=None, **kwargs):
""" A compliance schedule violation reflects the non-achievement of a given compliance schedule event including the type of violation and ty pe of resolution. """ |
return self._resolve_call('PCS_CMPL_SCHD_VIOL', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enforcement_action(self, column=None, value=None, **kwargs):
""" A disciplinary action taken against a permit facility. The action may be applicable to one or more violations. """ |
return self._resolve_call('PCS_ENFOR_ACTION', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hearing(self, column=None, value=None, **kwargs):
""" An evidentiary hearing. """ |
return self._resolve_call('PCS_EVIDENTIARY_HEARING_EVENT', column,
value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def industrial_user(self, column=None, value=None, **kwargs):
""" Information from the PCI_AUDIT table pertaining to industrial users, i.e. the number of significant industrial users. """ |
return self._resolve_call('PCS_INDUSTRIAL_USER_INFO', column,
value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def permit_event(self, column=None, value=None, **kwargs):
""" A permit event tracks the lifecycle of a permit from issuance to expiration. Examples include 'Application Received' and 'Permit Issued', etc. """ |
return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pipe_schedule(self, column=None, value=None, **kwargs):
""" Particular discharge points at a permit facility that are governed by effluent limitations and monitoring and submission requirements. """ |
return self._resolve_call('PCS_PIPE_SCHED', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def single_violation(self, column=None, value=None, **kwargs):
""" A single event violation is a one-time event that occurred on a fixed date, and is associated with one permitted facility. """ |
return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column,
value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def typify(value: Union[dict, list, set, str]):
""" Enhance block operation with native types. Typify takes a blockchain operation or dict/list/value, and then it parses and converts string types into native data types where appropriate. """ |
if type(value) == dict:
return walk_values(typify, value)
if type(value) in [list, set]:
return list(map(typify, value))
if type(value) == str:
if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value):
return keep_in_dict(dict(Amount(value)), ['amount', 'asset'])
if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value):
return parse_time(value)
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def json_expand(json_op):
""" For custom_json ops. """ |
if type(json_op) == dict and 'json' in json_op:
return update_in(json_op, ['json'], safe_json_loads)
return json_op |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, subnet_id):
""" This is bad delete function because one vpc can have more than one subnet. It is Ok if user only use CAL for manage cloud resource We will update ASAP. """ |
# 1 : show subnet
subnet = self.client.describe_subnets(
SubnetIds=[subnet_id]).get('Subnets')[0]
vpc_id = subnet.get('VpcId')
# 2 : delete subnet
self.client.delete_subnet(SubnetId=subnet_id)
# 3 : delete vpc
return self.client.delete_vpc(VpcId=vpc_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clean_terminals(self):
""" Because of the optimization, there are some non existing terminals on the generated list. Remove them by checking for terms in form Ax,x """ |
new_terminals = []
for term in self.grammar.grammar_terminals:
x_term = term.rfind('@')
y_term = term.rfind('A')
if y_term > x_term:
x_term = y_term
ids = term[x_term + 1:].split(',')
if len(ids) < 2:
"""It'input_string a normal terminal, not a state"""
new_terminals.append(term)
self.grammar.grammar_terminals = new_terminals |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_self_replicate(self, myntr):
""" For each Rule B -> c where c is a known terminal, this function searches for B occurences in rules with the form A -> B and sets A -> c. """ |
# print 'BFS Dictionary Update - Self Replicate'
find = 0
for nonterm in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nonterm]:
if self.grammar.grammar_rules[i][0] not in self.resolved and not isinstance(
self.grammar.grammar_rules[i][1], (set, tuple)) \
and self.grammar.grammar_rules[i][1] == myntr:
self.resolved[self.grammar.grammar_rules[i][0]] = self.resolved[myntr]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
find = 1
if find == 1:
return 1
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def describe(self):
""" describes a Symbol, returns a string """ |
lines = []
lines.append("Symbol = {}".format(self.name))
if len(self.tags):
tgs = ", ".join(x.tag for x in self.tags)
lines.append(" tagged = {}".format(tgs))
if len(self.aliases):
als = ", ".join(x.alias for x in self.aliases)
lines.append(" aliased = {}".format(als))
if len(self.feeds):
lines.append(" feeds:")
for fed in self.feeds:
lines.append(" {}. {}".format(fed.fnum,
fed.ftype))
return "\n".join(lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def datatable_df(self):
""" returns the dataframe representation of the symbol's final data """ |
data = self._all_datatable_data()
adf = pd.DataFrame(data)
adf.columns = self.dt_all_cols
return self._finish_df(adf, 'ALL') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_datatable(self):
"""
Instantiates the .datatable attribute, pointing to a table in the
database that stores all the cached data
""" |
try:
self.datatable = Table(self.name, Base.metadata, autoload=True)
except NoSuchTableError:
print "Creating datatable, cause it doesn't exist"
self.datatable = self._datatable_factory()
self.datatable.create()
self.datatable_exists = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _datatable_factory(self):
"""
creates a SQLAlchemy Table object with the appropriate number of
columns given the number of feeds
""" |
feed_cols = ['feed{0:03d}'.format(i + 1) for i in range(self.n_feeds)]
feed_cols = ['override_feed000'] + feed_cols + ['failsafe_feed999']
ind_sqlatyp = indexingtypes[self.index.indimp].sqlatyp
dat_sqlatyp = datadefs[self.dtype.datadef].sqlatyp
atbl = Table(self.name, Base.metadata,
Column('indx', ind_sqlatyp, primary_key=True),
Column('final', dat_sqlatyp),
*(Column(fed_col, dat_sqlatyp) for fed_col in feed_cols),
extend_existing=True)
self.dt_feed_cols = feed_cols[:]
self.dt_all_cols = ['indx', 'final'] + feed_cols[:]
return atbl |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tags(self, tags):
""" add a tag or tags to a Feed """ |
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
tmps = [FeedTag(tag=t, feed=self) for t in tags]
objs.add_all(tmps)
objs.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initiate_browser(self):
# Create a unique tempdir for downloaded files tempdir = os.getenv(TEMPDIR_ENVVAR, DEFAULT_TEMPDIR) tempsubdir = uuid4().hex # TODO: Remove this directory when finished! self.tempdir = os.path.join(tempdir, tempsubdir) try: # Try and create directory before checking if it exists, # to avoid race condition os.makedirs(self.tempdir) except OSError: if not os.path.isdir(self.tempdir):
raise profile = webdriver.FirefoxProfile() # Set download location, avoid download dialogues if possible # Different settings needed for different Firefox versions profile.set_preference('browser.download.folderList', 2) profile.set_preference('browser.download.manager.showWhenStarting', False) profile.set_preference('browser.download.manager.closeWhenDone', True) profile.set_preference('browser.download.dir', self.tempdir) profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream;application/vnd.ms-excel") profile.set_preference("browser.helperApps.alwaysAsk.force", False) profile.set_preference("browser.download.manager.useWindow", False) self.browser = webdriver.Firefox(profile) self.browser.get('http://webbstat.av.se') detailed_cls = "Document_TX_GOTOTAB_Avancerad" """ The button for expanded detailed options. This also happens to be a good indicator as to wheter all content is loaded. """ |
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3)
self.browser\
.find_element_by_class_name(detailed_cls)\
.find_element_by_tag_name("td")\
.click()
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_I_create_logrecords_with_table(context):
""" Step definition that creates one more log records by using a table. .. code-block: gherkin When I create log records with: | category | level | message | | foo | ERROR | Hello Foo | | foo.bar | WARN | Hello Foo.Bar | Table description | Column | Type | Required | Description | | category | string | yes | Category (or logger) to use. | | level | LogLevel | yes | Log level to use. | | message | string | yes | Log message to use. | .. code-block: python import logging from behave.configuration import LogLevel for row in table.rows: logger = logging.getLogger(row.category) level = LogLevel.parse_type(row.level) logger.log(level, row.message) """ |
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
for row in context.table.rows:
category = row["category"]
if category == "__ROOT__":
category = None
level = LogLevel.parse_type(row["level"])
message = row["message"]
make_log_record(category, level, message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_I_create_logrecord_with_table(context):
""" Create an log record by using a table to provide the parts. .. seealso: :func:`step_I_create_logrecords_with_table()` """ |
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_I_create_logrecords_with_table(context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_use_log_record_configuration(context):
""" Define log record configuration parameters. .. code-block: gherkin Given I use the log record configuration: | property | value | | format | | | datefmt | | """ |
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["property", "value"])
for row in context.table.rows:
property_name = row["property"]
value = row["value"]
if property_name == "format":
context.log_record_format = value
elif property_name == "datefmt":
context.log_record_datefmt = value
else:
raise KeyError("Unknown property=%s" % property_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def smart_decode(binary, errors="strict"):
""" Automatically find the right codec to decode binary data to string. :param binary: binary data :param errors: one of 'strict', 'ignore' and 'replace' :return: string """ |
d = chardet.detect(binary)
encoding = d["encoding"]
confidence = d["confidence"]
text = binary.decode(encoding, errors=errors)
return text, encoding, confidence |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode(self, binary, url, encoding=None, errors="strict"):
""" Decode binary to string. :param binary: binary content of a http request. :param url: endpoint of the request. :param encoding: manually specify the encoding. :param errors: errors handle method. :return: str """ |
if encoding is None:
domain = util.get_domain(url)
if domain in self.domain_encoding_table:
encoding = self.domain_encoding_table[domain]
html = binary.decode(encoding, errors=errors)
else:
html, encoding, confidence = smart_decode(
binary, errors=errors)
# cache domain name and encoding
self.domain_encoding_table[domain] = encoding
else:
html = binary.decode(encoding, errors=errors)
return html |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify_number_pattern(number_pattern, **kwargs):
"""Modifies a number pattern by specified keyword arguments.""" |
params = ['pattern', 'prefix', 'suffix', 'grouping',
'int_prec', 'frac_prec', 'exp_prec', 'exp_plus']
for param in params:
if param in kwargs:
continue
kwargs[param] = getattr(number_pattern, param)
return NumberPattern(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_currency_field(__, prec, number, locale):
"""Formats a currency field.""" |
locale = Locale.parse(locale)
currency = get_territory_currencies(locale.territory)[0]
if prec is None:
pattern, currency_digits = None, True
else:
prec = int(prec)
pattern = locale.currency_formats['standard']
pattern = modify_number_pattern(pattern, frac_prec=(prec, prec))
currency_digits = False
return format_currency(number, currency, pattern, locale=locale,
currency_digits=currency_digits) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_float_field(__, prec, number, locale):
"""Formats a fixed-point field.""" |
format_ = u'0.'
if prec is None:
format_ += u'#' * NUMBER_DECIMAL_DIGITS
else:
format_ += u'0' * int(prec)
pattern = parse_pattern(format_)
return pattern.apply(number, locale) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_number_field(__, prec, number, locale):
"""Formats a number field.""" |
prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.decimal_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_percent_field(__, prec, number, locale):
"""Formats a percent field.""" |
prec = PERCENT_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.percent_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_hexadecimal_field(spec, prec, number, locale):
"""Formats a hexadeciaml field.""" |
if number < 0:
# Take two's complement.
number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1
format_ = u'0%d%s' % (int(prec or 0), spec)
return format(number, format_) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delegate(attribute_name, method_names):
""" Decorator factory to delegate methods to an attribute. Decorate a class to map every method in `method_names` to the attribute `attribute_name`. """ |
call_attribute_method = partial(_call_delegated_method, attribute_name)
def decorate(class_):
for method in method_names:
setattr(class_, method, partialmethod(call_attribute_method, method))
return class_
return decorate |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_query(query):
""" Prepare a query object for the RAPI. RAPI has lots of curious rules for coercing values. This function operates on dicts in-place and has no return value. @type query: dict @param query: Query arguments """ |
for name in query:
value = query[name]
# None is sent as an empty string.
if value is None:
query[name] = ""
# Booleans are sent as 0 or 1.
elif isinstance(value, bool):
query[name] = int(value)
# XXX shouldn't this just check for basestring instead?
elif isinstance(value, dict):
raise ValueError("Invalid query data type %r" %
type(value).__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def itemgetters(*args):
""" Get a handful of items from an iterable. """ |
f = itemgetter(*args)
def inner(l):
return [f(x) for x in l]
return inner |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stat_container(self, container):
"""Stat container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). """ |
LOG.debug('stat_container() with %s is success.', self.driver)
return self.driver.stat_container(container) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_container(self, container, metadata, **kwargs):
"""Update container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param metadata(dict):
additional metadata to include in the request. :param **kwargs(dict):
extend args for specific driver. """ |
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_container(container, metadata, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stat_object(self, container, obj):
"""Stat object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). """ |
LOG.debug('stat_object() with %s is success.', self.driver)
return self.driver.stat_object(container, obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_object(self, container, obj, **kwargs):
"""Delete object in container :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). """ |
try:
LOG.debug('delete_object() with %s is success.', self.driver)
return self.driver.delete_object(container, obj, **kwargs)
except DriverException as e:
LOG.exception('download_object() with %s raised\
an exception %s.', self.driver, e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_container_objects(self, container, prefix=None, delimiter=None):
"""List container objects :param container: container name (Container is equivalent to Bucket term in Amazon). :param prefix: prefix query :param delimiter: string to delimit the queries on """ |
LOG.debug('list_container_objects() with %s is success.', self.driver)
return self.driver.list_container_objects(container, prefix, delimiter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_object(self, container, obj, metadata, **kwargs):
"""Update object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param metadata(dict):
additional metadata to include in the request. """ |
try:
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_object(container, obj,
metadata, **kwargs)
except DriverException as e:
LOG.exception('copy_object() with %s raised\
an exception %s.', self.driver, e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_path_fields(cls, base=[]):
"""Get object fields used for calculation of django-tutelary object paths. """ |
pfs = []
for pf in cls.TutelaryMeta.path_fields:
if pf == 'pk':
pfs.append(base + ['pk'])
else:
f = cls._meta.get_field(pf)
if isinstance(f, models.ForeignKey):
pfs += get_path_fields(f.target_field.model, base=base + [pf])
else:
pfs.append(base + [f.name])
return pfs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_perms_object(obj, action):
"""Get the django-tutelary path for an object, based on the fields listed in ``TutelaryMeta.pfs``. """ |
def get_one(pf):
if isinstance(pf, str):
return pf
else:
return str(reduce(lambda o, f: getattr(o, f), pf, obj))
return Object([get_one(pf) for pf in obj.__class__.TutelaryMeta.pfs]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def permissioned_model(cls, perm_type=None, path_fields=None, actions=None):
"""Function to set up a model for permissioning. Can either be called directly, passing a class and suitable values for ``perm_type``, ``path_fields`` and ``actions``, or can be used as a class decorator, taking values for ``perm_type``, ``path_fields`` and ``actions`` from the ``TutelaryMeta`` subclass of the decorated class. """ |
if not issubclass(cls, models.Model):
raise DecoratorException(
'permissioned_model',
"class '" + cls.__name__ + "' is not a Django model"
)
added = False
try:
if not hasattr(cls, 'TutelaryMeta'):
if perm_type is None or path_fields is None or actions is None:
raise DecoratorException(
'permissioned_model',
("missing argument: all of perm_type, path_fields and " +
"actions must be supplied")
)
added = True
cls.TutelaryMeta = type('TutelaryMeta', (object,),
dict(perm_type=perm_type,
path_fields=path_fields,
actions=actions))
cls.TutelaryMeta.pfs = ([cls.TutelaryMeta.perm_type] +
get_path_fields(cls))
perms_objs = {}
for a in cls.TutelaryMeta.actions:
an = a
ap = {}
if isinstance(a, tuple):
an = a[0]
ap = a[1]
Action.register(an)
if isinstance(ap, dict) and 'permissions_object' in ap:
po = ap['permissions_object']
if po is not None:
try:
t = cls._meta.get_field(po).__class__
if t not in [models.ForeignKey,
models.OneToOneField]:
raise PermissionObjectException(po)
except:
raise PermissionObjectException(po)
perms_objs[an] = po
if len(perms_objs) == 0:
cls.get_permissions_object = get_perms_object
else:
cls.get_permissions_object = make_get_perms_object(perms_objs)
return cls
except:
if added:
del cls.TutelaryMeta
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getArrays(items, attr, defaultValue):
"""Return arrays with equal size of item attributes from a list of sorted "items" for fast and convenient data processing. :param attr: list of item attributes that should be added to the returned array. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. """ |
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addMsrunContainers(mainContainer, subContainer):
"""Adds the complete content of all specfile entries from the subContainer to the mainContainer. However if a specfile of ``subContainer.info`` is already present in ``mainContainer.info`` its contents are not added to the mainContainer. :param mainContainer: :class:`MsrunContainer` :param subContainer: :class:`MsrunContainer` .. warning:: does not generate new items, all items added to the ``mainContainer`` are still present in the ``subContainer`` and changes made to elements of one container also affects the elements of the other one (ie elements share same memory location). """ |
typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic',
'sai': 'saic', 'si': 'sic'
}
for specfile in subContainer.info:
if specfile in mainContainer.info:
continue
mainContainer.addSpecfile(specfile, subContainer.info[specfile]['path'])
for datatype, status in listitems(subContainer.info[specfile]['status']):
if not status:
continue
datatypeContainer = typeToContainer[datatype]
dataTypeContainer = getattr(mainContainer, datatypeContainer)
subContainerData = getattr(subContainer,
datatypeContainer
)[specfile]
dataTypeContainer[specfile] = subContainerData
mainContainer.info[specfile]['status'][datatype] = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setPath(self, folderpath, specfiles=None):
"""Changes the folderpath of the specified specfiles. The folderpath is used for saving and loading of ``mrc`` files. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param folderpath: a filedirectory """ |
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
_containerSetPath(self, folderpath, specfiles) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def removeSpecfile(self, specfiles):
"""Completely removes the specified specfiles from the ``msrunContainer``. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: str, [str, str] """ |
for specfile in aux.toList(specfiles):
for datatypeContainer in ['rmc', 'cic', 'smic', 'saic', 'sic']:
dataContainer = getattr(self, datatypeContainer)
try:
del dataContainer[specfile]
except KeyError:
pass
del self.info[specfile] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _processDatatypes(self, rm, ci, smi, sai, si):
"""Helper function that returns a list of datatype strings, depending on the parameters boolean value. :param rm: bool, True to add ``rm`` :param ci: bool, True to add ``ci`` :param smi: bool, True to add ``smi`` :param sai: bool, True to add ``sai`` :param si: bool, True to add ``si`` """ |
datatypes = list()
for datatype, value in [('rm', rm), ('ci', ci), ('smi', smi),
('sai', sai), ('si', si)]:
if value:
datatypes.append(datatype)
return datatypes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False, compress=True, path=None):
"""Writes the specified datatypes to ``mrc`` files on the hard disk. .. note:: If ``.save()`` is called and no ``mrc`` files are present in the specified path new files are generated, otherwise old files are replaced. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param rm: bool, True to select ``self.rmc`` (run metadata) :param ci: bool, True to select ``self.cic`` (chromatogram items) :param smi: bool, True to select ``self.smic`` (spectrum metadata items) :param sai: bool, True to select ``self.saic`` (spectrum array items) :param si: bool, True to select ``self.sic`` (spectrum items) :param compress: bool, True to use zip file compression :param path: filedirectory to which the ``mrc`` files are written. By default the parameter is set to ``None`` and the filedirectory is read from ``self.info[specfile]['path']`` """ |
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.save()": "%s" '\
'is not present in "MsrunContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
for datatype in datatypes:
filename = specfile + '.mrc_' + datatype
filepath = aux.joinpath(specfilePath, filename)
with msr.open(filepath, 'w+b') as openfile:
if datatype == 'rm':
self._writeRmc(openfile, specfile)
elif datatype == 'ci':
self._writeCic(openfile, specfile, compress)
elif datatype == 'si':
self._writeSic(openfile, specfile, compress)
elif datatype == 'smi':
self._writeSmic(openfile, specfile, compress)
elif datatype == 'sai':
self._writeSaic(openfile, specfile, compress) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _writeRmc(self, filelike, specfile):
"""Writes the ``.rmc`` container entry of the specified specfile as an human readable and pretty formatted xml string. :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` """ |
xmlString = ETREE.tostring(self.rmc[specfile], pretty_print=True)
filelike.write(xmlString) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False):
"""Import the specified datatypes from ``mrc`` files on the hard disk. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param rm: bool, True to import ``mrc_rm`` (run metadata) :param ci: bool, True to import ``mrc_ci`` (chromatogram items) :param smi: bool, True to import ``mrc_smi`` (spectrum metadata items) :param sai: bool, True to import ``mrc_sai`` (spectrum array items) :param si: bool, True to import ``mrc_si`` (spectrum items) """ |
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
#Select only specfiles which are present in the ``self.info``.
selectedSpecfiles = list()
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.load()": "%s" '\
'not present in MsrunContainer.info' % specfile
warnings.warn(warntext)
else:
selectedSpecfiles.append(specfile)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in selectedSpecfiles:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path']
if 'rm' in datatypes:
rmPath = aux.joinpath(specfilePath, specfile+'.mrc_rm')
with open(rmPath, 'rb') as openfile:
xmlString = openfile.read()
self.rmc[specfile] = ETREE.fromstring(xmlString)
msrunInfo['status']['rm'] = True
if 'ci' in datatypes:
ciPath = aux.joinpath(specfilePath, specfile+'.mrc_ci')
self.cic[specfile] = aux.loadBinaryItemContainer(ciPath,
Ci.jsonHook)
msrunInfo['status']['ci'] = True
if 'smi' in datatypes:
smiPath = aux.joinpath(specfilePath, specfile+'.mrc_smi')
with zipfile.ZipFile(smiPath, 'r') as containerZip:
#Convert the zipfile data into a str object,necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
self.smic[specfile] = json.loads(jsonString,
object_hook=Smi.jsonHook
)
msrunInfo['status']['smi'] = True
if 'sai' in datatypes:
saiPath = aux.joinpath(specfilePath, specfile+'.mrc_sai')
self.saic[specfile] = aux.loadBinaryItemContainer(saiPath,
Sai.jsonHook
)
msrunInfo['status']['sai'] = True
if 'si' in datatypes:
siPath = aux.joinpath(specfilePath, specfile+'.mrc_si')
with zipfile.ZipFile(siPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
self.sic[specfile] = json.loads(jsonString,
object_hook=Si.jsonHook
)
msrunInfo['status']['si'] = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Ci`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Ci`, :class:`MzmlProduct`, :class:`MzmlPrecursor` """ |
if '__Ci__' in encoded:
return Ci._fromJSON(encoded['__Ci__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Smi`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Smi`, :class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor` """ |
if '__Smi__' in encoded:
return Smi._fromJSON(encoded['__Smi__'])
elif '__MzmlScan__' in encoded:
return MzmlScan._fromJSON(encoded['__MzmlScan__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def removeSpecfile(self, specfiles):
"""Completely removes the specified specfiles from the ``SiiContainer``. :param specfiles: the name of an ms-run file or a list of names. """ |
for specfile in aux.toList(specfiles):
del self.container[specfile]
del self.info[specfile] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, specfiles=None, compress=True, path=None):
"""Writes the specified specfiles to ``siic`` files on the hard disk. .. note:: If ``.save()`` is called and no ``siic`` files are present in the specified path new files are generated, otherwise old files are replaced. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param compress: bool, True to use zip file compression :param path: filedirectory to which the ``siic`` files are written. By default the parameter is set to ``None`` and the filedirectory is read from ``self.info[specfile]['path']`` """ |
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.save()": "%s" is'\
' not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
path = self.info[specfile]['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
filename = specfile + '.siic'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, specfile, compress) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calcMz(self, specfiles=None, guessCharge=True, obsMzKey='obsMz'):
"""Calculate the exact mass for ``Sii`` elements from the ``Sii.peptide`` sequence. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param guessCharge: bool, True if the charge should be guessed if the attribute ``charge`` is missing from ``Sii``. Uses the calculated peptide mass and the observed m/z value to calculate the charge. :param obsMzKey: attribute name of the observed m/z value in ``Sii``. """ |
#TODO: important to test function, since changes were made
_calcMass = maspy.peptidemethods.calcPeptideMass
_calcMzFromMass = maspy.peptidemethods.calcMzFromMass
_massProton = maspy.constants.atomicMassProton
_guessCharge = lambda mass, mz: round(mass / (mz - _massProton), 0)
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
tempMasses = dict()
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.calcMz()": '\
'"%s" is not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
else:
for sii in self.getItems(specfiles=specfile):
peptide = sii.peptide
if peptide not in tempMasses:
if hasattr(sii, 'diPeptide'):
tempMasses[peptide] = (_calcMass(sii.peptide1) +
_calcMass(sii.peptide2)
)
else:
tempMasses[peptide] = _calcMass(peptide)
peptideMass = tempMasses[peptide]
if sii.charge is not None:
sii.excMz = _calcMzFromMass(peptideMass, sii.charge)
elif guessCharge:
guessedCharge = _guessCharge(peptideMass,
getattr(sii, obsMzKey)
)
sii.excMz = _calcMzFromMass(peptideMass, guessedCharge)
sii.charge = guessedCharge
else:
sii.excMz = None
del(tempMasses) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _writeContainer(self, filelike, specfile, compress):
"""Writes the ``self.container`` entry of the specified specfile to the ``fic`` format. :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression .. note:: In addition it could also dump the ``self.info`` entry to the zipfile with the filename ``info``, but this is not used at the moment. For details see :func:`maspy.auxiliary.writeJsonZipfile()` """ |
aux.writeJsonZipfile(filelike, self.container[specfile],
compress=compress
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, specfiles=None):
"""Imports the specified ``fic`` files from the hard disk. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] """ |
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "FiContainer.load()": "%s" is'\
' not present in "FiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
fiPath = aux.joinpath(self.info[specfile]['path'],
specfile+'.fic'
)
with zipfile.ZipFile(fiPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
#infoString = io.TextIOWrapper(containerZip.open('info'),
# encoding='utf-8'
# ).read()
self.container[specfile] = json.loads(jsonString,
object_hook=Fi.jsonHook
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.