text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_parsed_args(command_name, doc, argv):
# type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any] """Parse the docstring with docopt. Args: command_name: The name of the subcommand to parse. doc: A docopt-parseable string. argv: The list of arguments to pass to docopt during parsing. Returns: The docopt results dictionary. If the subcommand has the same name as the primary command, the subcommand value will be added to the dictionary. """ |
_LOGGER.debug('Parsing docstring: """%s""" with arguments %s.', doc, argv)
args = docopt(doc, argv=argv)
if command_name == settings.command:
args[command_name] = True
return args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trace(msg):
"""Print a trace message to stderr if environment variable is set. """ |
if os.environ.get('JARN_TRACE') == '1':
print('TRACE:', msg, file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Efn(Ms,eccs):
"""works for -2pi < Ms < 2pi, e <= 0.97""" |
Ms = np.atleast_1d(Ms)
eccs = np.atleast_1d(eccs)
unit = np.floor(Ms / (2*np.pi))
Es = EFN((Ms % (2*np.pi)),eccs)
Es += unit*(2*np.pi)
return Es |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enable_modules_from_last_session(seashcommanddict):
""" Enable every module that isn't marked as disabled in the modules folder. This function is meant to be called when seash is initializing and nowhere else. A module is marked as disabled when there is a modulename.disabled file. """ |
successfully_enabled_modules = []
modules_to_enable = get_enabled_modules()
for modulename in modules_to_enable:
# There are no bad side effects to seash's state when we do this
# The only thing that should happen is that the modulename.disabled file
# gets created (temporarily)
disable(seashcommanddict, modulename)
try:
enable(seashcommanddict, modulename)
successfully_enabled_modules.append(modulename)
except seash_exceptions.ModuleConflictError, e:
print "Failed to enable the '"+modulename+"' module due to the following conflicting command:"
print str(e)
# We mark this module as disabled by adding a modulename.disabled file.
open(MODULES_FOLDER_PATH + os.sep + modulename + ".disabled", 'w')
except seash_exceptions.InitializeError, e:
print "Failed to enable the '"+modulename+"' module."
disable(seashcommanddict, modulename)
successfully_enabled_modules.sort()
print 'Enabled modules:', ', '.join(successfully_enabled_modules), '\n' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ensure_module_folder_exists():
""" Checks to see if the module folder exists. If it does not, create it. If there is an existing file with the same name, we raise a RuntimeError. """ |
if not os.path.isdir(MODULES_FOLDER_PATH):
try:
os.mkdir(MODULES_FOLDER_PATH)
except OSError, e:
if "file already exists" in str(e):
raise RuntimeError("Could not create modules folder: file exists with the same name") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_configuration_from_settings(self, setting_info):
""" Returns a dictionary with configuration names as keys and setting values extracted from this configurator's settings as values. :param setting_info: Sequence of 2-tuples containing the configuration name as the first and the setting name as the second element. """ |
settings = self.get_settings()
return dict([(name, settings.get(key))
for (name, key) in setting_info
if not settings.get(key, None) is None]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_repository(self, name, repository_type, repository_class, aggregate_class, make_default, configuration):
""" Generic method for adding a repository. """ |
repo_mgr = self.get_registered_utility(IRepositoryManager)
if name is None:
# If no name was given, this is assumed to be the ROOT repository
# for the given repository type.
name = REPOSITORY_DOMAINS.ROOT
repo = repo_mgr.new(repository_type, name=name,
make_default=make_default,
repository_class=repository_class,
aggregate_class=aggregate_class,
configuration=configuration)
repo_mgr.set(repo) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encrypt_email(email):
""" The default encryption function for storing emails in the database. This uses AES and the encryption key defined in the applications configuration. :param email: The email address. """ |
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.encrypt(email) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shortlink_scanned(self, data):
"""Called when a shortlink_scanned event is received """ |
# Inform log that we received an event
self.logger.info("Received shortlink_scanned event")
data = json.loads(data)
customer_token = str(data['object']['id'])
response = self.mapiclient.create_payment_request(
customer=customer_token,
currency="NOK",
amount="20.00",
allow_credit=True,
pos_id=self._pos_id,
pos_tid=str(uuid.uuid4()),
action='auth',
expires_in=90,
callback_uri="pusher:m-winterwarming-pos_callback_chan",
text='Have some hot chocolate!')
self._tid = response['id']
print(str(self._tid)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pusher_connected(self, data):
"""Called when the pusherclient is connected """ |
# Inform user that pusher is done connecting
self.logger.info("Pusherclient connected")
# Bind the events we want to listen to
self.callback_client.bind("payment_authorized",
self.payment_authorized)
self.callback_client.bind("shortlink_scanned",
self.shortlink_scanned) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, *args, **kwargs):
""" Checks the cache to see if there's a cached entry for this pk. If not, fetches using super then stores the result in cache. Most of the logic here was gathered from a careful reading of ``django.db.models.sql.query.add_filter`` """ |
if self.query.where:
# If there is any other ``where`` filter on this QuerySet just call
# super. There will be a where clause if this QuerySet has already
# been filtered/cloned.
return super(CachingQuerySet, self).get(*args, **kwargs)
# Punt on anything more complicated than get by pk/id only...
if len(kwargs) == 1:
k = kwargs.keys()[0]
if k in ('pk', 'pk__exact', '%s' % self.model._meta.pk.attname,
'%s__exact' % self.model._meta.pk.attname):
obj = cache.get(self.model._cache_key(pk=kwargs.values()[0]))
if obj is not None:
obj.from_cache = True
return obj
# Calls self.iterator to fetch objects, storing object in cache.
return super(CachingQuerySet, self).get(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_path(self, name):
""" Fetch contents from the path retrieved via lookup_path. No caching will be done. """ |
with codecs.open(self.lookup_path(name), encoding='utf-8') as fd:
return fd.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self, mold_id, data, wrapper_tag='div'):
""" Execute a mold `mold_id` by rendering through ``env``. This is done using its default template, with data provided as dict. This returns the wrapped content, which contains the bits that the client-side on-load script trigger will execute the index.js defined for this mold; if this is not desired, simply call the render method instead. """ |
template = self.load_mold(mold_id)
kwargs = {}
kwargs.update(data)
kwargs['_nunja_data_'] = 'data-nunja="%s"' % mold_id
kwargs['_template_'] = template
kwargs['_wrapper_tag_'] = wrapper_tag
return self._core_template_.render(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(self, mold_id, data):
""" Render a mold `mold_id`. No wrappers are applied as only the default template defined for the mold is rendered. """ |
template = self.load_mold(mold_id)
return template.render(**data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_model_table(self, part):
""" Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) """ |
rows = self.parser.find(part).find_children('tr').list_results()
table = []
for row in rows:
table.append(self._get_model_row(self.parser.find(
row
).find_children('td,th').list_results()))
return self._get_valid_model_table(table) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_valid_model_table(self, ros):
""" Returns a list that represents the table with the rowspans. :param ros: The list that represents the table without the rowspans. :type ros: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) :return The list that represents the table with the rowspans. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) """ |
# pylint: disable=no-self-use
new_table = []
if bool(ros):
length_table = len(ros)
for row_index in range(0, length_table):
cells_added = 0
original_row = [] + ros[row_index]
if len(new_table) <= row_index:
new_table.append([])
length_row = len(original_row)
for cell_index in range(0, length_row):
cell = original_row[cell_index]
new_cell_index = cell_index + cells_added
new_row = new_table[row_index]
while True:
if len(new_row) <= new_cell_index:
new_row.append(None)
break
elif new_row[new_cell_index] is None:
break
else:
cells_added += 1
new_cell_index = cell_index + cells_added
new_row[new_cell_index] = cell
if cell.has_attribute('rowspan'):
rowspan = int(cell.get_attribute('rowspan'))
if rowspan > 1:
for rowspan_index in range(1, rowspan):
new_row_index = row_index + rowspan_index
if len(new_table) <= new_row_index:
new_table.append([])
while (
len(new_table[new_row_index])
< new_cell_index
):
new_table[new_row_index].append(None)
new_table[new_row_index].append(cell)
return new_table |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_model_row(self, row):
""" Returns a list that represents the line of table with the colspans. :param row: The list that represents the line of table without the colspans. :type row: list(hatemile.util.html.htmldomelement.HTMLDOMElement) :return: The list that represents the line of table with the colspans. :rtype: list(hatemile.util.html.htmldomelement.HTMLDOMElement) """ |
# pylint: disable=no-self-use
new_row = [] + row
size = len(row)
for i in range(0, size):
cell = row[i]
if cell.has_attribute('colspan'):
colspan = int(cell.get_attribute('colspan'))
if colspan > 1:
for j in range(1, colspan):
new_row.insert(i + j, cell)
return new_row |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_header(self, hed):
""" Validate the list that represents the table header. :param hed: The list that represents the table header. :type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) :return: True if the table header is valid or False if the table header is not valid. :rtype: bool """ |
# pylint: disable=no-self-use
if not bool(hed):
return False
length = -1
for row in hed:
if not bool(row):
return False
elif length == -1:
length = len(row)
elif len(row) != length:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_cells_headers_ids(self, hed, index):
""" Returns a list with ids of rows of same column. :param hed: The list that represents the table header. :type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) :param index: The index of columns. :type index: int :return: The list with ids of rows of same column. :rtype: list(str) """ |
# pylint: disable=no-self-use
ids = []
for row in hed:
if row[index].get_tag_name() == 'TH':
ids.append(row[index].get_attribute('id'))
return ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _associate_data_cells_with_header_cells_of_row(self, element):
""" Associate the data cell with header cell of row. :param element: The table body or table footer. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement """ |
table = self._get_model_table(element)
for row in table:
headers_ids = []
for cell in row:
if cell.get_tag_name() == 'TH':
self.id_generator.generate_id(cell)
headers_ids.append(cell.get_attribute('id'))
cell.set_attribute('scope', 'row')
if bool(headers_ids):
for cell in row:
if cell.get_tag_name() == 'TD':
headers = cell.get_attribute('headers')
for header_id in headers_ids:
headers = CommonFunctions.increase_in_list(
headers,
header_id
)
cell.set_attribute('headers', headers) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _prepare_header_cells(self, table_header):
""" Set the scope of header cells of table header. :param table_header: The table header. :type table_header: hatemile.util.html.htmldomelement.HTMLDOMElement """ |
cells = self.parser.find(table_header).find_children(
'tr'
).find_children('th').list_results()
for cell in cells:
self.id_generator.generate_id(cell)
cell.set_attribute('scope', 'col') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encrypt(clear_text) -> str:
""" Use config.json key to encrypt """ |
if not isinstance(clear_text, bytes):
clear_text = str.encode(clear_text)
cipher = Fernet(current_app.config['KEY'])
return cipher.encrypt(clear_text).decode("utf-8") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decrypt(crypt_text) -> str:
""" Use config.json key to decrypt """ |
cipher = Fernet(current_app.config['KEY'])
if not isinstance(crypt_text, bytes):
crypt_text = str.encode(crypt_text)
return cipher.decrypt(crypt_text).decode("utf-8") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_volume(self, id):
""" return volume information if the argument is an id or a path """ |
# If the id is actually a path
if exists(id):
with open(id) as file:
size = os.lseek(file.fileno(), 0, os.SEEK_END)
return {'path': id, 'size': size}
return self.volume.get(id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randomize(self, device=None, percent=100, silent=False):
""" Writes random data to the beginning of each 4MB block on a block device this is useful when performance testing the backup process (Without any optional arguments will randomize the first 32k of each 4MB block on 100 percent of the device) """ |
volume = self.get_volume(device)
# The number of blocks in the volume
blocks = int(volume['size'] / BLOCK_SIZE)
# How many writes should be to the device
# (based on the percentage requested)
num_writes = int(blocks * percent * 0.01)
# Build a list of offsets we write to
offsets = sorted(random.sample(range(blocks), num_writes))
total = 0
if not silent:
print('Writing urandom to %s bytes in %s' % (volume['size'],
volume['path']))
with open(volume['path'], 'w') as file:
for offset in offsets:
if not silent:
self.dot()
file.seek(offset * BLOCK_SIZE)
# Create a random string 32k long then duplicate
# the randomized string 128 times (32768 * 128 = 4MB)
data = os.urandom(32768) * 128
total += len(data)
# write out the 4MB block of randomized data
file.write(data)
print("\nWrote: %s" % total) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def backup(self, id=None, src=None, timestamp=None):
""" This runs a backup job outside of the storage api, which is useful for performance testing backups """ |
# Set basic Logging
logging.basicConfig()
# Get the lunr logger
log = logger.get_logger()
# Output Debug level info
log.logger.setLevel(logging.DEBUG)
# Load the local storage configuration
conf = LunrConfig.from_storage_conf()
# If no time provided, use current time
timestamp = timestamp or time()
# Init our helpers
volume = VolumeHelper(conf)
backup = BackupHelper(conf)
try:
# Create the snapshot
snapshot = volume.create_snapshot(src, id, timestamp)
# For testing non-snapshot speeds
# snapshot = volume.get(src)
# snapshot['backup_id'] = id
# snapshot['origin'] = src
# snapshot['timestamp'] = 1338410885.0
# del snapshot['volume']
print("Created snap-shot: ", pprint(snapshot))
with self.timeit(snapshot['size']):
# Backup the snapshot
print("Starting Backup")
backup.save(snapshot, id)
finally:
# Delete the snapshot if it was created
if 'snapshot' in locals():
self._remove_volume(snapshot['path']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ip(request):
"""Return the IP address inside the HTTP_X_FORWARDED_FOR var inside the `request` object. The return of this function can be overrided by the `LOCAL_GEOLOCATION_IP` variable in the `conf` module. This function will skip local IPs (starting with 10. and equals to 127.0.0.1). """ |
if getsetting('LOCAL_GEOLOCATION_IP'):
return getsetting('LOCAL_GEOLOCATION_IP')
forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if not forwarded_for:
return UNKNOWN_IP
for ip in forwarded_for.split(','):
ip = ip.strip()
if not ip.startswith('10.') and not ip == '127.0.0.1':
return ip
return UNKNOWN_IP |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_connection(self):
"""Return a valid redis connection based on the following settings * `REDIS_CONNECTIONS` * `EVENTLIB_REDIS_CONFIG_NAME` The first one is a dictionary in the following format: The second one is the name of the entry present in the above dict, like `server1` or `server2`. """ |
if self.conn:
return self.conn
redis_configs = getsetting('REDIS_CONNECTIONS')
if redis_configs:
config_name = getsetting('EVENTLIB_REDIS_CONFIG_NAME', 'default')
config = redis_configs[config_name]
host = config['HOST']
port = config['PORT']
self.conn = redis.StrictRedis(host=host, port=port)
else:
self.conn = None
return self.conn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run_setup_py(self, args, echo=True, echo2=True, ff=''):
"""Run setup.py with monkey-patched setuptools. The patch forces setuptools to use the file-finder 'ff'. If 'ff' is the empty string, the patch is not applied. 'args' is the list of arguments that should be passed to setup.py. """ |
python = self.python
if ff:
setup_py = '-c"%s"' % (RUN_SETUP % locals())
else:
setup_py = 'setup.py %s' % ' '.join(args)
rc, lines = self.process.popen(
'"%(python)s" %(setup_py)s' % locals(), echo=echo, echo2=echo2)
return rc, lines |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def app_factory(global_settings, **local_settings):
# pylint: disable=W0613 """ Default factory for creating a WSGI application using the everest configurator and root factory. :param dict global_settings: Global settings extracted from an ini file. Not used in this default app factory. :param dict local_settings: App settings extracted from an ini file. """ |
config = Configurator()
config.setup_registry(settings=local_settings,
root_factory=RootFactory())
if 'configure_zcml' in local_settings:
config.load_zcml(local_settings['configure_zcml'])
app = config.make_wsgi_app()
# In the absence of an application name in the settings, we have to
# extract the main app's name from the ini file, which unfortunately
# means parsing it again.
app_name = app_name_from_ini_file(global_settings['__file__'])
ep_group = "%s.plugins" % app_name
plugin_mgr = config.get_registered_utility(IPluginManager)
plugin_mgr.load_all(ep_group)
return app |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def fetch_page(session, host):
""" Perform the page fetch from an individual host. `session` - An aiohttp [client session](http://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session) `host` - URL to fetch `return` tuple with the following: * The host parameter * A vague status string * Text response or an exception depending on status above """ |
await asyncio.sleep(random.randint(0, 25) * 0.1)
start = time.time()
logger.info('Fetch from {}'.format(host))
try:
response = await session.get(host, allow_redirects=False)
except aiohttp.ClientResponseError as err:
# likely a 404 implying HTTP but no page
# likely a 401 implying HTTP but no access
# FIXME: for instance, a gateway
# headers are available via err.headers()
# https://multidict.readthedocs.io/en/stable/multidict.html#multidict.CIMultiDict
results_tuple = (host, 'no page', err)
except aiohttp.ClientConnectorError as err:
# likely device at IP but no HTTP server
results_tuple = (host, 'no http', err)
except aiohttp.ServerConnectionError as err:
# likely ServerTimeoutError implying no device at IP
results_tuple = (host, 'no dev', err)
except aiohttp.InvalidURL as err:
# likely a malformed URL
results_tuple = (host, 'no URL', err)
# except Exception as err:
# # Generic trap for debug
# results_tuple = (host, 'unknown', err)
else:
try:
text_response = await response.text()
except aiohttp.ClientPayloadError as err:
# trouble reading page TODO: anyway to recover?
results_tuple = (host, 'no read', err)
else:
results_tuple = (host, 'found', text_response)
response.close()
logger.info('Recvd from {} after {:.2f}s'.format(host, time.time() - start))
return results_tuple |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def asynchronous(urls=None, re_filter=None):
""" Asynchronous request manager for session. Returns list of responses that match the filter. `urls` - tuple of URLs to request `re_filter` - a compiled regular expression [object](https://docs.python.org/3/library/re.html#re-objects) """ |
class _URLBase(str):
""" Convenient access to hostname (ip) portion of the URL """
@property
def hostname(self):
return urlsplit(self).hostname
http_devices = {}
qualified_devices = []
connection = aiohttp.TCPConnector(limit=0)
async with aiohttp.ClientSession(connector=connection,
conn_timeout=5, raise_for_status=True) as session:
futures = [fetch_page(session, url) for url in urls]
for future in asyncio.as_completed(futures):
response = await future
if 'found' in response[1]:
http_devices[response[0]] = response[2]
logger.debug('Processed %s', response[0])
if re_filter.search(response[2]):
qualified_devices.append(_URLBase(response[0]))
# print('The following responded to HTTP:')
# for x in http_devices.keys():
# print(x)
return qualified_devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url_generator(network=None, path=''):
""" Return a tuple of URLs with path, one for each host on network `network` - IP address and subnet mask compatible with [ipaddress library](https://docs.python.org/3/library/ipaddress.html#ipaddress.ip_network) `path` - Path portion of a URL as defined by [url(un)split](https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlsplit) """ |
network_object = ipaddress.ip_network(network)
if network_object.num_addresses > 256:
# will need to batch process this case otherwise we run out of selectors
logger.error('Scan limited to 256 addresses, requested %d.', network_object.num_addresses)
raise NotImplementedError
elif network_object.num_addresses > 1:
# async request upto 256 hosts
network_hosts = network_object.hosts()
else:
# assume user intent was a single IP address
network_hosts = [network_object.network_address]
return (urlunsplit(('http',str(ip),path,'','')) for ip in network_hosts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def survey(network=None, path='', pattern='', log=False):
""" Search network for hosts with a response to path that matches pattern `network` - IP address and subnet mask compatible with [ipaddress library](https://docs.python.org/3/library/ipaddress.html#ipaddress.ip_network) `path` - Path portion of a URL as defined by [url(un)split](https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlsplit) `pattern` - A regular expression pattern compatible with [re.compile](https://docs.python.org/3/library/re.html#re.compile) `log` - boolean to control logging level """ |
if log:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.CRITICAL)
network_scan = asyncio.ensure_future(asynchronous(
urls=url_generator(network=network, path=path),
re_filter=re.compile(pattern))
)
ioloop = asyncio.get_event_loop()
ioloop.run_until_complete(network_scan)
# Zero-sleep to allow underlying connections to close
# http://aiohttp.readthedocs.io/en/stable/client_advanced.html#graceful-shutdown
ioloop.run_until_complete(asyncio.sleep(0))
# ioloop.close() # don't close the loop, so it's available for re-use
# https://stackoverflow.com/questions/45010178/how-to-use-asyncio-event-loop-in-library-function
return sorted(network_scan.result(), key=lambda x: ipaddress.ip_address(x.hostname)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hidden_cursor():
"""Temporarily hide the terminal cursor.""" |
if sys.stdout.isatty():
_LOGGER.debug('Hiding cursor.')
print('\x1B[?25l', end='')
sys.stdout.flush()
try:
yield
finally:
if sys.stdout.isatty():
_LOGGER.debug('Showing cursor.')
print('\n\x1B[?25h', end='')
sys.stdout.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def display_status():
"""Display an OK or FAILED message for the context block.""" |
def print_status(msg, color):
"""Print the status message.
Args:
msg: The message to display (e.g. OK or FAILED).
color: The ANSI color code to use in displaying the message.
"""
print('\r' if sys.stdout.isatty() else '\t', end='')
print('{}{}[{color}{msg}{}]{}'.format(
Cursor.FORWARD(_ncols() - 8),
Style.BRIGHT,
Fore.RESET,
Style.RESET_ALL,
color=color,
msg=msg[:6].upper().center(6)
))
sys.stdout.flush()
try:
yield
except Status as e:
_LOGGER.debug(e)
print_status(e.msg, e.color)
if e.exc:
raise e.exc # pylint: disable=raising-bad-type
except (KeyboardInterrupt, EOFError):
raise
except Exception:
print_status('FAILED', Fore.RED)
raise
else:
print_status('OK', Fore.GREEN) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pusher_connect_handler(self, data):
"""Event handler for the connection_established event. Binds the shortlink_scanned event """ |
self.channel = self.pusher.subscribe(self.pos_callback_chan)
for listener in self.pusher_connected_listeners:
listener(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _runForever(self, stop_event):
"""Runs the main loop Arguments: stop_event: threading.Event() as a stop signal """ |
while(not stop_event.is_set()):
state = self.pusher.connection.state
if (state is not "connecting" and
state is not "connected"):
self.logger.warning(
"Pusher seems to be disconnected, trying to reconnect")
self.pusher.connect()
stop_event.wait(0.5) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Stops the pusherclient cleanly """ |
self.pusherthread_stop.set()
self.pusher.disconnect()
# wait until pusher is down
while self.pusher.connection.state is "connected":
sleep(0.1)
logging.info("shutting down pusher connector thread") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, filename, bs=512):
"""Loads GPT partition table. Args: filename (str):
path to file or device to open for reading bs (uint):
Block size of the volume, default: 512 Raises: IOError: If file does not exist or not readable """ |
with open(filename, 'rb') as f:
f.seek(GPT_HEADER_OFFSET + 0x0C)
header_size = struct.unpack("<I", f.read(4))[0]
f.seek(GPT_HEADER_OFFSET)
header_data = f.read(header_size)
self.header = GPT_HEADER(header_data)
if (self.header.signature != GPT_SIGNATURE):
raise Exception("Invalid GPT signature")
self.__load_partition_entries(f, bs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def global_to_local(self, index):
""" Calculate local index from global index :param index: input index :return: local index for data """ |
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def int_global_to_local_start(self, index, axis=0):
""" Calculate local index from global index from start_index :param index: global index as integer :param axis: current axis to process :return: """ |
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def int_global_to_local_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index :param index: global index as integer :param axis: current axis to process :return: """ |
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def int_global_to_local(self, index, axis=0):
""" Calculate local index from global index for integer input :param index: global index as integer :param axis: current axis to process :return: """ |
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def int_out_of_bounds(self, index, axis=0):
""" examples if index is out of local processing bounds function is used to perform examples for index of type integer :param index: global index to examples as type int :param axis: current axis to examples :return: return input or raise error """ |
#if index >= self._global_shape[axis]:
if index > self._global_shape[axis]:
raise IndexError('index is larger than the upper bound')
# wrap around index if negative like in python
if index < 0:
index += self._global_shape[axis]
#warnings.warn('warp around may occur')
# check for invalid wrap around
if index < 0:
raise IndexError('index is smaller than the lower bound')
return index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def out_of_bounds(self, index):
""" Check index for out of bounds :param index: index as integer, tuple or slice :return: local index as tuple """ |
if type(index) is int:
return self.int_out_of_bounds(index)
elif type(index) is slice:
return self.slice_out_of_bounds(index)
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if type(item) is slice:
temp_index = self.slice_out_of_bounds(item, k)
elif type(item) is int:
temp_index = self.int_out_of_bounds(item, k)
# FIXME: will fail if item is no int or slice!
if temp_index is None:
return temp_index
local_index.append(temp_index)
return tuple(local_index) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_server_setting(self, protocol, host='127.0.0.1', port=8000,
debug=False, ssl=None, sock=None, workers=1,
loop=None, backlog=100, has_log=True):
'''Helper function used by `run`.'''
if isinstance(ssl, dict):
# try common aliaseses
cert = ssl.get('cert') or ssl.get('certificate')
key = ssl.get('key') or ssl.get('keyfile')
if cert is None or key is None:
raise ValueError('SSLContext or certificate and key required.')
context = create_default_context(purpose=Purpose.CLIENT_AUTH)
context.load_cert_chain(cert, keyfile=key)
ssl = context
server_settings = {
'protocol': protocol,
'request_handler': self.request_handler,
'log': self.log,
'netlog': self.netlog,
'host': host,
'port': port,
'sock': sock,
'ssl': ssl,
'signal': self.signal,
'debug': debug,
'request_timeout': self.request_timeout,
'request_max_size': self.request_max_size,
'keep_alive': self.keep_alive,
'loop': loop,
'backlog': backlog,
'has_log': has_log
}
for event_name, settings_name, reverse in (
('before_server_start', 'before_start', False),
('after_server_start', 'after_start', False),
('before_server_stop', 'before_stop', True),
('after_server_stop', 'after_stop', True),
):
listeners = self.listeners[event_name].copy()
if reverse:
listeners.reverse()
# Prepend mach9 to the arguments when listeners are triggered
listeners = [partial(listener, self.app) for listener in listeners]
server_settings[settings_name] = listeners
if debug:
self.log.setLevel(logging.DEBUG)
# Serve
if host and port:
proto = 'http'
if ssl is not None:
proto = 'https'
self.log.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port))
return server_settings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify(path):
"""Verify that `path` has the qpimage series file format""" |
valid = False
try:
h5 = h5py.File(path, mode="r")
qpi0 = h5["qpi_0"]
except (OSError, KeyError):
pass
else:
if ("qpimage version" in qpi0.attrs and
"phase" in qpi0 and
"amplitude" in qpi0 and
"bg_data" in qpi0["phase"] and
"bg_data" in qpi0["amplitude"]):
valid = True
return valid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_requirements(output_path=None):
""" Loop through the INSTALLED_APPS and create a set of requirements for pip. if output_path is ``None`` then write to standard out, otherwise write to the path. """ |
from django.conf import settings
reqs = set()
for app in settings.INSTALLED_APPS:
if app in mapping.keys():
reqs |= set(mapping[app])
if output_path is None:
print "--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/"
for item in reqs:
print item
else:
try:
out_file = open(output_path, 'w')
out_file.write("--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\n")
for item in reqs:
out_file.write("%s\n" % item)
finally:
out_file.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_mbr_plugin(self, fs_id, plugin):
"""Used in plugin's registration routine, to associate it's detection method with given filesystem id Args: fs_id: filesystem id that is read from MBR partition entry plugin: plugin that supports this filesystem """ |
self.logger.debug('MBR: {}, FS ID: {}'
.format(self.__get_plugin_name(plugin), fs_id))
self.__mbr_plugins[fs_id].append(plugin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_gpt_plugin(self, fs_guid, plugin):
"""Used in plugin's registration routine, to associate it's detection method with given filesystem guid Args: fs_guid: filesystem guid that is read from GPT partition entry plugin: plugin that supports this filesystem """ |
key = uuid.UUID(fs_guid.lower())
self.logger.debug('GPT: {}, GUID: {}'
.format(self.__get_plugin_name(plugin), fs_guid))
self.__gpt_plugins[key].append(plugin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect_mbr(self, filename, offset, fs_id):
"""Used by rawdisk.session.Session to match mbr partitions against filesystem plugins. Args: filename: device or file that it will read in order to detect the filesystem fs_id: filesystem id to match (ex. 0x07) offset: offset for the filesystem that is being matched Returns: Volume object supplied by matched plugin. If there is no match, None is returned """ |
self.logger.debug('Detecting MBR partition type')
if fs_id not in self.__mbr_plugins:
return None
else:
plugins = self.__mbr_plugins.get(fs_id)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect_gpt(self, filename, offset, fs_guid):
"""Used by rawdisk.session.Session to match gpt partitions agains filesystem plugins. Args: filename: device or file that it will read in order to detect the filesystem fs_id: filesystem guid to match (ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7}) offset: offset for the filesystem that is being matched Returns: Volume object supplied by matched plugin. If there is no match, None is returned """ |
self.logger.debug('Detecting GPT partition type')
if fs_guid not in self.__gpt_plugins:
return None
else:
plugins = self.__gpt_plugins.get(fs_guid)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inject_documentation(**options):
""" Generate configuration documentation in reStructuredText_ syntax. :param options: Any keyword arguments are passed on to the :class:`ConfigLoader` initializer. This methods injects the generated documentation into the output generated by cog_. .. _cog: https://pypi.python.org/pypi/cogapp """ |
import cog
loader = ConfigLoader(**options)
cog.out("\n" + loader.documentation + "\n\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_file(self, filename):
""" Read a text file and provide feedback to the user. :param filename: The pathname of the file to read (a string). :returns: The contents of the file (a string). """ |
logger.info("Reading file: %s", format_path(filename))
contents = self.context.read_file(filename)
num_lines = len(contents.splitlines())
logger.debug("Read %s from %s.",
pluralize(num_lines, 'line'),
format_path(filename))
return contents.rstrip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute_file(self, filename):
""" Execute a file and provide feedback to the user. :param filename: The pathname of the file to execute (a string). :returns: Whatever the executed file returns on stdout (a string). """ |
logger.info("Executing file: %s", format_path(filename))
contents = self.context.execute(filename, capture=True).stdout
num_lines = len(contents.splitlines())
logger.debug("Execution of %s yielded % of output.",
format_path(filename),
pluralize(num_lines, 'line'))
return contents.rstrip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_file(self, filename, contents):
""" Write a text file and provide feedback to the user. :param filename: The pathname of the file to write (a string). :param contents: The new contents of the file (a string). """ |
logger.info("Writing file: %s", format_path(filename))
contents = contents.rstrip() + b"\n"
self.context.write_file(filename, contents)
logger.debug("Wrote %s to %s.",
pluralize(len(contents.splitlines()), "line"),
format_path(filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_input(function):
"""Decorator that validates the kwargs of the function passed to it.""" |
@wraps(function)
def wrapper(*args, **kwargs):
try:
name = function.__name__ + '_validator' # find validator name
globals()[name](kwargs) # call validation function
return function(*args, **kwargs)
except KeyError:
raise Exception("Could not find validation schema for the"
" function " + function.__name__)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getModulePath(project_path,module_name,verbose):
'''Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None.'''
if not module_name:
return None
sys.path.append(project_path)
try:
package = pkgutil.get_loader(module_name)
except ImportError:
if verbose:
print("Parent module for "+module_name+" not found.")
return None
except:
if verbose:
print(module_name+" not loaded for bizarre reasons")
try:
if package:
if package.get_code(module_name):
filename = package.get_code(module_name).co_filename
return filename
elif package.find_spec(module_name).has_location==False:
return None #built-in module such as itertools
else:
pass #perhaps filename is in package.find_spec(module_name).origin?
pass #a good reference is https://www.python.org/dev/peps/pep-0302/
except ImportError:
if verbose:
print("Code object unavailable for "+module_name)
return None
except AttributeError:
if verbose:
print(module_name+" is an ExtensionFileLoader object")
return None
except:
if verbose:
print(module_name+" not loaded for bizarre reasons")
return None
else:
if verbose:
print ("Module "+module_name+" not found.")
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getImportFromObjects(node):
'''Returns a list of objects referenced by import from node'''
somenames = [x.asname for x in node.names if x.asname]
othernames = [x.name for x in node.names if not x.asname]
return somenames+othernames |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_slug_expression(attr):
""" Converts the given instrumented string attribute into an SQL expression that can be used as a slug. Slugs are identifiers for members in a collection that can be used in an URL. We create slug columns by replacing non-URL characters with dashes and lower casing the result. We need this at the ORM level so that we can use the slug in a query expression. """ |
slug_expr = sa_func.replace(attr, ' ', '-')
slug_expr = sa_func.replace(slug_expr, '_', '-')
slug_expr = sa_func.lower(slug_expr)
return slug_expr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mapper(class_, local_table=None, id_attribute='id', slug_expression=None, *args, **kwargs):
""" Convenience wrapper around the SA mapper which will set up the hybrid "id" and "slug" attributes required by everest after calling the SA mapper. If you (e.g., for testing purposes) want to clear mappers created with this function, use the :func:`clear_mappers` function in this module. :param str id_attribute: the name of the column in the table to use as ID column (will be aliased to a new "id" attribute in the mapped class) :param slug_expression: function to generate a slug SQL expression given the mapped class as argument. """ |
mpr = sa_mapper(class_, local_table=local_table, *args, **kwargs)
# Set up the ID attribute as a hybrid property, if necessary.
if id_attribute != 'id':
# Make sure we are not overwriting an already mapped or customized
# 'id' attribute.
if 'id' in mpr.columns:
mpr.dispose()
raise ValueError('Attempting to overwrite the mapped "id" '
'attribute.')
elif isdatadescriptor(getattr(class_, 'id', None)):
mpr.dispose()
raise ValueError('Attempting to overwrite the custom data '
'descriptor defined for the "id" attribute.')
class_.id = synonym(id_attribute)
# If this is a polymorphic class, a base class may already have a
# hybrid descriptor set as slug attribute.
slug_descr = None
for base_cls in class_.__mro__:
try:
slug_descr = object.__getattribute__(base_cls, 'slug')
except AttributeError:
pass
else:
break
if isinstance(slug_descr, hybrid_descriptor):
if not slug_expression is None:
raise ValueError('Attempting to overwrite the expression for '
'an inherited slug hybrid descriptor.')
hyb_descr = slug_descr
else:
# Set up the slug attribute as a hybrid property.
if slug_expression is None:
cls_expr = lambda cls: cast(getattr(cls, 'id'), String)
else:
cls_expr = slug_expression
hyb_descr = hybrid_descriptor(slug_descr, expr=cls_expr)
class_.slug = hyb_descr
return mpr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def synonym(name):
""" Utility function mimicking the behavior of the old SA synonym function with the new hybrid property semantics. """ |
return hybrid_property(lambda inst: getattr(inst, name),
lambda inst, value: setattr(inst, name, value),
expr=lambda cls: getattr(cls, name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_system_entities(engine, metadata, reset):
""" Maps all system entities. """ |
# Map the user message system entity.
msg_tbl = Table('_user_messages', metadata,
Column('guid', String, nullable=False, primary_key=True),
Column('text', String, nullable=False),
Column('time_stamp', DateTime(timezone=True),
nullable=False, default=sa_func.now()),
)
mapper(UserMessage, msg_tbl, id_attribute='guid')
if reset:
metadata.drop_all(bind=engine, tables=[msg_tbl])
metadata.create_all(bind=engine, tables=[msg_tbl]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def schematron(self, fn=None, outfn=None, ext='.sch'):
"""convert the Schema to schematron and save at the given output filename or with the given extension.""" |
from .xslt import XSLT
from . import PATH, XML, etree
fn = fn or self.fn
if os.path.splitext(fn)[-1].lower()==ext:
return fn
elif os.path.splitext(fn)[-1].lower()!='.rng':
fn = Schema(fn=fn).trang(ext='.rng')
rng2sch = XSLT(fn=os.path.join(PATH, 'xslts', 'rng2sch.xslt'))
rng = XML(fn=fn)
outfn = outfn or os.path.splitext(fn)[0]+ext
sch = XML(fn=outfn, root=rng2sch.saxon9(rng.root).getroot())
sch.write()
return sch.fn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xhtml(self, outfn=None, ext='.xhtml', css=None, **params):
"""convert the Schema to XHTML with the given output filename or with the given extension.""" |
from markdown import markdown
from copy import deepcopy
from bl.file import File
from .xslt import XSLT
from .rng import RNG
from . import XML, PATH, etree
rncfn = os.path.splitext(self.fn)[0] + '.rnc'
rngfn = os.path.splitext(self.fn)[0] + '.rng'
htmlfn = os.path.splitext(self.fn)[0] + '.html'
if self.fn==rncfn or os.path.exists(rncfn):
rngfn = Schema(rncfn).trang(ext='.rng')
assert os.path.exists(rngfn)
# convert all <r:define> elements into a <a:definition> blocks containing a compact syntax alternative
rng = RNG(fn=rngfn)
for define in rng.xpath(rng.root, "//r:define"):
log.debug("%s %r" % (rng.tag_name(define), define.attrib))
tempdefine = deepcopy(define)
tempgrammar = deepcopy(rng.root); tempgrammar.text = '\n'
for ch in tempgrammar.getchildren(): rng.remove(ch)
tempgrammar.insert(0, tempdefine)
for adoc in rng.xpath(tempdefine, ".//a:documentation | .//a:definition"):
rng.remove(adoc)
with tempfile.TemporaryDirectory() as tempdir:
x = XML(fn=os.path.join(tempdir, 'define.rng'), root=tempgrammar)
x.write()
newfn = Schema(x.fn).trang(ext='.rnc')
txt = open(newfn, 'rb').read().decode('utf-8')
if '\n\n' in txt:
txt = txt[txt.index('\n\n')+1:].strip()
adef = etree.Element("{%(a)s}definition" % RNG.NS)
adef.text = txt
adef.tail = '\n\t\t'
log.debug(adef.text)
annotations = rng.xpath(define, "a:*")
if len(annotations) > 0:
index = define.index(annotations[-1])+1
else:
index = 0
define.insert(index, adef)
rng.write()
xslt = XSLT(fn=os.path.join(PATH, 'xslts', 'rng2md.xslt'))
md = xslt.saxon9(rng.root, **params).strip()
html_body = markdown(md,
output_format="xhtml5",
extensions=[ # see https://python-markdown.github.io/extensions/
'markdown.extensions.extra',
'markdown.extensions.admonition',
'markdown.extensions.headerid',
'markdown.extensions.sane_lists',
'markdown.extensions.toc']).strip()
html_text = """<html><head><meta charset="UTF-8"/><style type="text/css">
body {font-family:sans-serif;line-height:1.3}
h1,h2,h3 {margin:1em 0 .25em 0}
h1 {font-size:2rem;font-weight:normal;}
h2 {font-size:1.2rem;font-weight:bold;}
h3 {font-size:1.15rem;font-weight:normal;font-style:italic;}
p {margin:0 0 .5rem 0;}
p.subtitle {font-size:1.2rem;font-family:sans-serif;margin-bottom:1em}
p.code {font-family:monospace;font-size:.6rem;color:#666;line-height:1.1}
pre {font-family:monospace;font-size:.6rem;color:#666;line-height:1.1;margin-left:1.5rem;}
hr {border:0;border-top:1px solid #999;margin:1rem 0;}
</style></head><body>\n""" + html_body + """\n</body></html>"""
html = XML(fn=htmlfn, root=html_text)
return html |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_tag(cls, tag, schemas, ext='.rnc'):
"""load a schema using an element's tag. schemas can be a string or a list of strings""" |
return cls(fn=cls.filename(tag, schemas, ext=ext)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filename(cls, tag, schemas, ext='.rnc'):
"""given a tag and a list of schemas, return the filename of the schema. If schemas is a string, treat it as a comma-separated list. """ |
if type(schemas)==str:
schemas = re.split("\s*,\s*", schemas)
for schema in schemas:
fn = os.path.join(schema, cls.dirname(tag), cls.basename(tag, ext=ext))
if os.path.exists(fn):
return fn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def errors_as_text(self):
""" only available to Django 1.7+ """ |
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
""" add attr to fields """ |
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
""" add class to html widgets. """ |
self.add_attr2fields('class', html_class, fields, exclude) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_required_fields(self, fields=[]):
""" set required to True """ |
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
""" check if this object is unique """ |
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_info(pyfile):
'''Retrieve dunder values from a pyfile'''
info = {}
info_re = re.compile(r"^__(\w+)__ = ['\"](.*)['\"]")
with open(pyfile, 'r') as f:
for line in f.readlines():
match = info_re.search(line)
if match:
info[match.group(1)] = match.group(2)
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Entrypoint for ``lander`` executable.""" |
args = parse_args()
config_logger(args)
logger = structlog.get_logger(__name__)
if args.show_version:
# only print the version
print_version()
sys.exit(0)
version = pkg_resources.get_distribution('lander').version
logger.info('Lander version {0}'.format(version))
config = Configuration(args=args)
# disable any build confirmed to be a PR with Travis
if config['is_travis_pull_request']:
logger.info('Skipping build from PR.')
sys.exit(0)
lander = Lander(config)
lander.build_site()
logger.info('Build complete')
if config['upload']:
lander.upload_site()
logger.info('Upload complete')
logger.info('Lander complete') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert_node(self, node):
""" Adds node if name is available or pre-existing node returns True if added returns False if not added """ |
if self._is_node_reserved(node):
return False
# Put node in map
self._node_map[node.get_id()] = node
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join(self, distbase, location):
"""Join 'distbase' and 'location' in such way that the result is a valid scp destination. """ |
sep = ''
if distbase and distbase[-1] not in (':', '/'):
sep = '/'
return distbase + sep + location |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_location(self, location, depth=0):
"""Resolve aliases and apply distbase. """ |
if not location:
return []
if location in self.aliases:
res = []
if depth > MAXALIASDEPTH:
err_exit('Maximum alias depth exceeded: %(location)s' % locals())
for loc in self.aliases[location]:
res.extend(self.get_location(loc, depth+1))
return res
if self.is_server(location):
return [location]
if location == 'pypi':
err_exit('No configuration found for server: pypi\n'
'Please create a ~/.pypirc file')
if self.urlparser.is_url(location):
return [location]
if not self.has_host(location) and self.distbase:
return [self.join(self.distbase, location)]
return [location] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_default_location(self):
"""Return the default location. """ |
res = []
for location in self.distdefault:
res.extend(self.get_location(location))
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_empty_locations(self, locations=None):
"""Fail if 'locations' is empty. """ |
if locations is None:
locations = self.locations
if not locations:
err_exit('mkrelease: option -d is required\n%s' % USAGE) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_valid_locations(self, locations=None):
"""Fail if 'locations' contains bad destinations. """ |
if locations is None:
locations = self.locations
for location in locations:
if (not self.is_server(location) and
not self.is_ssh_url(location) and
not self.has_host(location)):
err_exit('Unknown location: %(location)s' % locals()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_locations(self):
"""Print known dist-locations and exit. """ |
known = self.defaults.get_known_locations()
for default in self.defaults.distdefault:
if default not in known:
known.add(default)
if not known:
err_exit('No locations', 0)
for location in sorted(known):
if location in self.defaults.distdefault:
print(location, '(default)')
else:
print(location)
sys.exit(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_uploadflags(self, location):
"""Return uploadflags for the given server. """ |
uploadflags = []
server = self.defaults.servers[location]
if self.sign:
uploadflags.append('--sign')
elif server.sign is not None:
if server.sign:
uploadflags.append('--sign')
elif self.defaults.sign:
uploadflags.append('--sign')
if self.identity:
if '--sign' not in uploadflags:
uploadflags.append('--sign')
uploadflags.append('--identity="%s"' % self.identity)
elif '--sign' in uploadflags:
if server.identity is not None:
if server.identity:
uploadflags.append('--identity="%s"' % server.identity)
elif self.defaults.identity:
uploadflags.append('--identity="%s"' % self.defaults.identity)
return uploadflags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_options(self):
"""Process the command line. """ |
args = self.parse_options(self.args)
if args:
self.directory = args[0]
if self.develop:
self.skiptag = True
if not self.develop:
self.develop = self.defaults.develop
if not self.develop:
self.infoflags = self.setuptools.infoflags
if not self.formats:
self.formats = self.defaults.formats
for format in self.formats:
if format == 'zip':
self.distributions.append(('sdist', ['--formats="zip"']))
elif format == 'gztar':
self.distributions.append(('sdist', ['--formats="gztar"']))
elif format == 'egg':
self.distributions.append(('bdist', ['--formats="egg"']))
elif format == 'wheel':
self.distributions.append(('bdist_wheel', []))
if not self.distributions:
self.distributions.append(('sdist', ['--formats="zip"']))
if self.list:
self.list_locations()
if not self.locations:
self.locations.extend(self.locations.get_default_location())
if not (self.skipregister and self.skipupload):
if not (self.get_skipregister() and self.get_skipupload()):
self.locations.check_empty_locations()
self.locations.check_valid_locations()
if len(args) > 1:
if self.urlparser.is_url(self.directory):
self.branch = args[1]
elif self.urlparser.is_ssh_url(self.directory):
self.branch = args[1]
else:
err_exit('mkrelease: invalid arguments\n%s' % USAGE)
if len(args) > 2:
err_exit('mkrelease: too many arguments\n%s' % USAGE) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_package(self):
"""Get the URL or sandbox to release. """ |
directory = self.directory
develop = self.develop
scmtype = self.scmtype
self.scm = self.scms.get_scm(scmtype, directory)
if self.scm.is_valid_url(directory):
directory = self.urlparser.abspath(directory)
self.remoteurl = directory
self.isremote = self.push = True
else:
directory = abspath(expanduser(directory))
self.isremote = False
self.scm.check_valid_sandbox(directory)
self.setuptools.check_valid_package(directory)
name, version = self.setuptools.get_package_info(directory, develop)
print('Releasing', name, version)
if not self.skipcommit:
if self.scm.is_dirty_sandbox(directory):
self.scm.commit_sandbox(directory, name, version, self.push) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_release(self):
"""Build and distribute the package. """ |
directory = self.directory
infoflags = self.infoflags
branch = self.branch
develop = self.develop
scmtype = self.scm.name
tempdir = abspath(tempfile.mkdtemp(prefix='mkrelease-'))
try:
if self.isremote:
directory = join(tempdir, 'build')
self.scm.clone_url(self.remoteurl, directory)
else:
directory = abspath(expanduser(directory))
self.scm.check_valid_sandbox(directory)
if self.isremote:
branch = self.scm.make_branchid(directory, branch)
if branch:
self.scm.switch_branch(directory, branch)
if scmtype != 'svn':
branch = self.scm.get_branch_from_sandbox(directory)
print('Releasing branch', branch)
self.setuptools.check_valid_package(directory)
if not (self.skipcommit and self.skiptag):
self.scm.check_dirty_sandbox(directory)
self.scm.check_unclean_sandbox(directory)
name, version = self.setuptools.get_package_info(directory, develop)
if self.isremote:
print('Releasing', name, version)
if not self.skiptag:
print('Tagging', name, version)
tagid = self.scm.make_tagid(directory, version)
self.scm.check_tag_exists(directory, tagid)
self.scm.create_tag(directory, tagid, name, version, self.push)
if self.manifest:
scmtype = 'none'
for distcmd, distflags in self.distributions:
manifest = self.setuptools.run_egg_info(
directory, infoflags, scmtype, self.quiet)
distfile = self.setuptools.run_dist(
directory, infoflags, distcmd, distflags, scmtype, self.quiet)
for location in self.locations:
if self.locations.is_server(location):
if not self.get_skipregister(location):
self.setuptools.run_register(
directory, infoflags, location, scmtype, self.quiet)
if not self.get_skipupload():
uploadflags = self.get_uploadflags(location)
if '--sign' in uploadflags and isfile(distfile+'.asc'):
os.remove(distfile+'.asc')
self.setuptools.run_upload(
directory, infoflags, distcmd, distflags, location, uploadflags,
scmtype, self.quiet)
else:
if not self.skipupload:
if self.locations.is_ssh_url(location):
scheme, location = self.urlparser.to_ssh_url(location)
self.scp.run_upload(scheme, distfile, location)
else:
self.scp.run_upload('scp', distfile, location)
finally:
shutil.rmtree(tempdir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure_gateway( cls, launch_jvm: bool = True, gateway: Union[GatewayParameters, Dict[str, Any]] = None, callback_server: Union[CallbackServerParameters, Dict[str, Any]] = False, javaopts: Iterable[str] = (), classpath: Iterable[str] = ''):
""" Configure a Py4J gateway. :param launch_jvm: ``True`` to spawn a Java Virtual Machine in a subprocess and connect to it, ``False`` to connect to an existing Py4J enabled JVM :param gateway: either a :class:`~py4j.java_gateway.GatewayParameters` object or a dictionary of keyword arguments for it :param callback_server: callback server parameters or a boolean indicating if a callback server is wanted :param javaopts: options passed to Java itself :param classpath: path or iterable of paths to pass to the JVM launcher as the class path """ |
assert check_argument_types()
classpath = classpath if isinstance(classpath, str) else os.pathsep.join(classpath)
javaopts = list(javaopts)
# Substitute package names with their absolute directory paths
for match in package_re.finditer(classpath):
pkgname = match.group(1)
module = import_module(pkgname)
module_dir = os.path.dirname(module.__file__)
classpath = classpath.replace(match.group(0), module_dir)
if gateway is None:
gateway = {}
if isinstance(gateway, dict):
gateway.setdefault('eager_load', True)
gateway.setdefault('auto_convert', True)
gateway = GatewayParameters(**gateway)
if isinstance(callback_server, dict):
callback_server = CallbackServerParameters(**callback_server)
elif callback_server is True:
callback_server = CallbackServerParameters()
return launch_jvm, gateway, callback_server, classpath, javaopts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, filename, offset):
"""Loads NTFS volume information Args: filename (str):
Path to file/device to read the volume \ information from. offset (uint):
Valid NTFS partition offset from the beginning \ of the file/device. Raises: IOError: If source file/device does not exist or is not readable """ |
self.offset = offset
self.filename = filename
self.bootsector = BootSector(
filename=filename,
length=NTFS_BOOTSECTOR_SIZE,
offset=self.offset)
self.mft_table = MftTable(
mft_entry_size=self.bootsector.mft_record_size,
filename=self.filename,
offset=self.mft_table_offset
)
self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES)
self._load_volume_information() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mft_zone_size(self, num_clusters, mft_zone_multiplier=1):
"""Returns mft zone size in clusters. From ntfs_progs.1.22.""" |
sizes = {
4: num_clusters >> 1, # 50%
3: (num_clusters * 3) >> 3, # 37,5%
2: num_clusters >> 2, # 25%
}
return sizes.get(mft_zone_multiplier, num_clusters >> 3) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
""" Closes all the iterators. This is particularly important if the iterators are files. """ |
if hasattr(self, 'iterators'):
for it in self.iterators:
if hasattr(it, 'close'):
it.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_sorting(self):
""" Insert new entries into the merged iterator. :param sorted_tops: A SortedDict. :param tops: The most recent entry from each iterator. :param idxs: The indices to update. """ |
key = self.key
sorted_tops = self.sorted_tops
tops = self.tops
iterators = self.iterators
for idx in self.idxs:
try:
tops[idx] = next(iterators[idx])
top_key = key(tops[idx])
if top_key not in sorted_tops:
sorted_tops[top_key] = []
sorted_tops[top_key].append(idx)
except StopIteration:
pass
if len(sorted_tops) == 0:
raise StopIteration
key, self.idxs = sorted_tops.popitem(last=False)
self.c_idx = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def domain_user_stats():
# type: () -> pd.Series """ Get number of distinct email addresses in observed domains TODO: get up to date with new projects layout How to build email_domain_users.csv: from collections import defaultdict import logging from common import utils as common import stscraper as scraper log = logging.getLogger("domain_user_stats") stats = defaultdict(set) for ecosystem in common.ECOSYSTEMS: urls = common.package_urls(ecosystem) for package_name, url in urls.items():
log.info(package_name) try: cs = scraper.commits(url) except scraper.RepoDoesNotExist: continue for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue try: user, email_domain = clean(email_addr).split("@") except InvalidEmail: continue stats[email_domain].add(user) s = pd.Series({dm: len(users) for dm, users in stats.items()}) s = s.rename("users").sort_values(ascending=False) s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True) return s """ |
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_university(addr):
# type: (Union[str, unicode]) -> bool """ Check if provided email has a university domain - either in .edu domain (except public sercices like england.edu or australia.edu) - or in .edu.TLD (non-US based institutions, like edu.au) - or listed in a public list of universities since universities often have department addresses as well, only the end is matched. E.g. cs.cmu.edu will match cmu.edu :param addr: email address :return: bool True False """ |
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_public(addr):
# type: (Union[str, unicode]) -> bool """ Check if the passed email registered at a free pubic mail server :param addr: email address to check :return: bool False True """ |
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_color(self, text, attr=None):
'''write text at current cursor position and interpret color escapes.
return the number of characters written.
'''
log(u'write_color("%s", %s)' % (text, attr))
chunks = self.terminal_escape.split(text)
log(u'chunks=%s' % repr(chunks))
bg = self.savebg
n = 0 # count the characters we actually write, omitting the escapes
if attr is None:#use attribute from initial console
attr = self.attr
try:
fg = self.trtable[(0x000f&attr)]
bg = self.trtable[(0x00f0&attr)>>4]
except TypeError:
fg = attr
for chunk in chunks:
m = self.escape_parts.match(chunk)
if m:
log(m.group(1))
attr = ansicolor.get(m.group(1), self.attr)
n += len(chunk)
System.Console.ForegroundColor = fg
System.Console.BackgroundColor = bg
System.Console.Write(chunk)
return n |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def files(self):
"""List of hologram data file names in the input zip file""" |
if self._files is None:
self._files = SeriesZipTifHolo._index_files(self.path)
return self._files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_time(self, idx):
"""Time for each TIFF file If there are no metadata keyword arguments defined for the TIFF file format, then the zip file `date_time` value is used. """ |
# first try to get the time from the TIFF file
# (possible meta data keywords)
ds = self._get_dataset(idx)
thetime = ds.get_time()
if np.isnan(thetime):
# use zipfile date_time
zf = zipfile.ZipFile(self.path)
info = zf.getinfo(self.files[idx])
timetuple = tuple(list(info.date_time) + [0, 0, 0])
thetime = time.mktime(timetuple)
return thetime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_remote_data(self, localvars, remotevars, inds, shape):
""" Method that does the updating of local netcdf cache with remote data """ |
# If user specifies 'all' then entire xy domain is
# grabbed, default is 4, specified in the model_controller
if self.horiz_size == 'all':
y, y_1 = 0, shape[-2]
x, x_1 = 0, shape[-1]
else:
r = self.horiz_size
x, x_1 = self.point_get.value[2]-r, self.point_get.value[2]+r+1
y, y_1 = self.point_get.value[1]-r, self.point_get.value[1]+r+1
x, x_1 = x[0], x_1[0]
y, y_1 = y[0], y_1[0]
if y < 0:
y = 0
if x < 0:
x = 0
if y_1 > shape[-2]:
y_1 = shape[-2]
if x_1 > shape[-1]:
x_1 = shape[-1]
# Update domain variable for where we will add data
domain = self.local.variables['domain']
if len(shape) == 4:
domain[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1] = np.ones((inds[-1]+1-inds[0], shape[1], y_1-y, x_1-x))
elif len(shape) == 3:
domain[inds[0]:inds[-1]+1, y:y_1, x:x_1] = np.ones((inds[-1]+1-inds[0], y_1-y, x_1-x))
# Update the local variables with remote data
logger.debug("Filling cache with: Time - %s:%s, Lat - %s:%s, Lon - %s:%s" % (str(inds[0]), str(inds[-1]+1), str(y), str(y_1), str(x), str(x_1)))
for local, remote in zip(localvars, remotevars):
if len(shape) == 4:
local[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1] = remote[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1]
else:
local[inds[0]:inds[-1]+1, y:y_1, x:x_1] = remote[inds[0]:inds[-1]+1, y:y_1, x:x_1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def need_data(self, i):
""" Method to test if cache contains the data that the particle needs """ |
# If we are not caching, we always grab data from the raw source
if self.caching is False:
return False
logger.debug("Checking cache for data availability at %s." % self.part.location.logstring())
try:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
self.dataset.opennc()
# Test if the cache has the data we need
# If the point we request contains fill values,
# we need data
cached_lookup = self.dataset.get_values('domain', timeinds=[np.asarray([i])], point=self.part.location)
logger.debug("Type of result: %s" % type(cached_lookup))
logger.debug("Double mean of result: %s" % np.mean(np.mean(cached_lookup)))
logger.debug("Type of Double mean of result: %s" % type(np.mean(np.mean(cached_lookup))))
if type(np.mean(np.mean(cached_lookup))) == np.ma.core.MaskedConstant:
need = True
logger.debug("I NEED data. Got back: %s" % cached_lookup)
else:
need = False
logger.debug("I DO NOT NEED data")
except StandardError:
# If the time index doesnt even exist, we need
need = True
logger.debug("I NEED data (no time index exists in cache)")
finally:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return need |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def linterp(self, setx, sety, x):
""" Linear interp of model data values between time steps """ |
if math.isnan(sety[0]) or math.isnan(setx[0]):
return np.nan
#if math.isnan(sety[0]):
# sety[0] = 0.
#if math.isnan(sety[1]):
# sety[1] = 0.
return sety[0] + (x - setx[0]) * ( (sety[1]-sety[0]) / (setx[1]-setx[0]) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.