text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pip_search(self, search_string=None): """Search for pip packages in PyPI matching `search_string`."""
extra_args = ['search', search_string] return self._call_pip(name='root', extra_args=extra_args, callback=self._pip_search)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pip_search(stdout, stderr): """Callback for pip search."""
result = {} lines = to_text_string(stdout).split('\n') while '' in lines: lines.remove('') for line in lines: if ' - ' in line: parts = line.split(' - ') name = parts[0].strip() description = parts[1].strip() result[name] = description return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _timer_update(self): """Add some moving points to the dependency resolution text."""
self._timer_counter += 1 dot = self._timer_dots.pop(0) self._timer_dots = self._timer_dots + [dot] self._rows = [[_(u'Resolving dependencies') + dot, u'', u'', u'']] index = self.createIndex(0, 0) self.dataChanged.emit(index, index) if self._timer_counter > 150: self._timer.stop() self._timer_counter = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_worker(self, method, *args, **kwargs): """Create a worker for this client to be run in a separate thread."""
# FIXME: this might be heavy... thread = QThread() worker = ClientWorker(method, args, kwargs) worker.moveToThread(thread) worker.sig_finished.connect(self._start) worker.sig_finished.connect(thread.quit) thread.started.connect(worker.start) self._queue.append(thread) self._threads.append(thread) self._workers.append(worker) self._start() return worker
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_repodata(filepaths, extra_data=None, metadata=None): """Load all the available pacakges information. For downloaded repodata files (repo.continuum.io), additional data provided (anaconda cloud), and additional metadata and merge into a single set of packages and apps. """
extra_data = extra_data if extra_data else {} metadata = metadata if metadata else {} repodata = [] for filepath in filepaths: compressed = filepath.endswith('.bz2') mode = 'rb' if filepath.endswith('.bz2') else 'r' if os.path.isfile(filepath): with open(filepath, mode) as f: raw_data = f.read() if compressed: data = bz2.decompress(raw_data) else: data = raw_data try: data = json.loads(to_text_string(data, 'UTF-8')) except Exception as error: logger.error(str(error)) data = {} repodata.append(data) all_packages = {} for data in repodata: packages = data.get('packages', {}) for canonical_name in packages: data = packages[canonical_name] name, version, b = tuple(canonical_name.rsplit('-', 2)) if name not in all_packages: all_packages[name] = {'versions': set(), 'size': {}, 'type': {}, 'app_entry': {}, 'app_type': {}, } elif name in metadata: temp_data = all_packages[name] temp_data['home'] = metadata[name].get('home', '') temp_data['license'] = metadata[name].get('license', '') temp_data['summary'] = metadata[name].get('summary', '') temp_data['latest_version'] = metadata[name].get('version') all_packages[name] = temp_data all_packages[name]['versions'].add(version) all_packages[name]['size'][version] = data.get('size', '') # Only the latest builds will have the correct metadata for # apps, so only store apps that have the app metadata if data.get('type'): all_packages[name]['type'][version] = data.get('type') all_packages[name]['app_entry'][version] = data.get( 'app_entry') all_packages[name]['app_type'][version] = data.get( 'app_type') all_apps = {} for name in all_packages: versions = sort_versions(list(all_packages[name]['versions'])) all_packages[name]['versions'] = versions[:] for version in versions: has_type = all_packages[name].get('type') # Has type in this case implies being an app if has_type: all_apps[name] = all_packages[name].copy() # Remove all versions that are not apps! versions = all_apps[name]['versions'][:] types = all_apps[name]['type'] app_versions = [v for v in versions if v in types] all_apps[name]['versions'] = app_versions return all_packages, all_apps
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def login(self, username, password, application, application_url): """Login to anaconda cloud."""
logger.debug(str((username, application, application_url))) method = self._anaconda_client_api.authenticate return self._create_worker(method, username, password, application, application_url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logout(self): """Logout from anaconda cloud."""
logger.debug('Logout') method = self._anaconda_client_api.remove_authentication return self._create_worker(method)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_repodata(self, filepaths, extra_data=None, metadata=None): """ Load all the available pacakges information for downloaded repodata. Files include repo.continuum.io, additional data provided (anaconda cloud), and additional metadata and merge into a single set of packages and apps. """
logger.debug(str((filepaths))) method = self._load_repodata return self._create_worker(method, filepaths, extra_data=extra_data, metadata=metadata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_model_data(self, packages, linked, pip=None, private_packages=None): """Prepare downloaded package info along with pip pacakges info."""
logger.debug('') return self._prepare_model_data(packages, linked, pip=pip, private_packages=private_packages)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_domain(self, domain='https://api.anaconda.org'): """Reset current api domain."""
logger.debug(str((domain))) config = binstar_client.utils.get_config() config['url'] = domain binstar_client.utils.set_config(config) self._anaconda_client_api = binstar_client.utils.get_server_api( token=None, log_level=logging.NOTSET) return self.user()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def packages(self, login=None, platform=None, package_type=None, type_=None, access=None): """Return all the available packages for a given user. Parameters type_: Optional[str] Only find packages that have this conda `type`, (i.e. 'app'). access : Optional[str] Only find packages that have this access level (e.g. 'private', 'authenticated', 'public'). """
logger.debug('') method = self._anaconda_client_api.user_packages return self._create_worker(method, login=login, platform=platform, package_type=package_type, type_=type_, access=access)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def country(from_key='name', to_key='iso'): """Creates and returns a mapper function to access country data. The mapper function that is returned must be called with one argument. In the default case you call it with a name and it returns a 3-letter ISO_3166-1 code, e. g. called with ``Spain`` it would return ``ESP``. :param from_key: (optional) the country attribute you give as input. Defaults to ``name``. :param to_key: (optional) the country attribute you want as output. Defaults to ``iso``. :return: mapper :rtype: function """
gc = GeonamesCache() dataset = gc.get_dataset_by_key(gc.get_countries(), from_key) def mapper(input): # For country name inputs take the names mapping into account. if 'name' == from_key: input = mappings.country_names.get(input, input) # If there is a record return the demanded attribute. item = dataset.get(input) if item: return item[to_key] return mapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cities(self): """Get a dictionary of cities keyed by geonameid."""
if self.cities is None: self.cities = self._load_data(self.cities, 'cities.json') return self.cities
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cities_by_name(self, name): """Get a list of city dictionaries with the given name. City names cannot be used as keys, as they are not unique. """
if name not in self.cities_by_names: if self.cities_items is None: self.cities_items = list(self.get_cities().items()) self.cities_by_names[name] = [dict({gid: city}) for gid, city in self.cities_items if city['name'] == name] return self.cities_by_names[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_repo_urls_from_channels(self, channels): """ Convert a channel into a normalized repo name including. Channels are assumed in normalized url form. """
repos = [] sys_platform = self._conda_api.get_platform() for channel in channels: url = '{0}/{1}/repodata.json.bz2'.format(channel, sys_platform) repos.append(url) return repos
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_repos(self, repos): """Check if repodata urls are valid."""
self._checking_repos = [] self._valid_repos = [] for repo in repos: worker = self.download_is_valid_url(repo) worker.sig_finished.connect(self._repos_checked) worker.repo = repo self._checking_repos.append(repo)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _repos_checked(self, worker, output, error): """Callback for _check_repos."""
if worker.repo in self._checking_repos: self._checking_repos.remove(worker.repo) if output: self._valid_repos.append(worker.repo) if len(self._checking_repos) == 0: self._download_repodata(self._valid_repos)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _repo_url_to_path(self, repo): """Convert a `repo` url to a file path for local storage."""
repo = repo.replace('http://', '') repo = repo.replace('https://', '') repo = repo.replace('/', '_') return os.sep.join([self._data_directory, repo])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _download_repodata(self, checked_repos): """Dowload repodata."""
self._files_downloaded = [] self._repodata_files = [] self.__counter = -1 if checked_repos: for repo in checked_repos: path = self._repo_url_to_path(repo) self._files_downloaded.append(path) self._repodata_files.append(path) worker = self.download_async(repo, path) worker.url = repo worker.path = path worker.sig_finished.connect(self._repodata_downloaded) else: # Empty, maybe there is no internet connection # Load information from conda-meta and save that file path = self._get_repodata_from_meta() self._repodata_files = [path] self._repodata_downloaded()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_repodata_from_meta(self): """Generate repodata from local meta files."""
path = os.sep.join([self.ROOT_PREFIX, 'conda-meta']) packages = os.listdir(path) meta_repodata = {} for pkg in packages: if pkg.endswith('.json'): filepath = os.sep.join([path, pkg]) with open(filepath, 'r') as f: data = json.load(f) if 'files' in data: data.pop('files') if 'icondata' in data: data.pop('icondata') name = pkg.replace('.json', '') meta_repodata[name] = data meta_repodata_path = os.sep.join([self._data_directory, 'offline.json']) repodata = {'info': [], 'packages': meta_repodata} with open(meta_repodata_path, 'w') as f: json.dump(repodata, f, sort_keys=True, indent=4, separators=(',', ': ')) return meta_repodata_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _repodata_downloaded(self, worker=None, output=None, error=None): """Callback for _download_repodata."""
if worker: self._files_downloaded.remove(worker.path) if worker.path in self._files_downloaded: self._files_downloaded.remove(worker.path) if len(self._files_downloaded) == 0: self.sig_repodata_updated.emit(list(set(self._repodata_files)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def repodata_files(self, channels=None): """ Return the repodata paths based on `channels` and the `data_directory`. There is no check for validity here. """
if channels is None: channels = self.conda_get_condarc_channels() repodata_urls = self._set_repo_urls_from_channels(channels) repopaths = [] for repourl in repodata_urls: fullpath = os.sep.join([self._repo_url_to_path(repourl)]) repopaths.append(fullpath) return repopaths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_repodata(self, channels=None): """Update repodata from channels or use condarc channels if None."""
norm_channels = self.conda_get_condarc_channels(channels=channels, normalize=True) repodata_urls = self._set_repo_urls_from_channels(norm_channels) self._check_repos(repodata_urls)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_metadata(self): """ Update the metadata available for packages in repo.continuum.io. Returns a download worker. """
if self._data_directory is None: raise Exception('Need to call `api.set_data_directory` first.') metadata_url = 'https://repo.continuum.io/pkgs/metadata.json' filepath = os.sep.join([self._data_directory, 'metadata.json']) worker = self.download_requests(metadata_url, filepath) return worker
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_valid_channel(self, channel, conda_url='https://conda.anaconda.org'): """Check if channel is valid."""
if channel.startswith('https://') or channel.startswith('http://'): url = channel else: url = "{0}/{1}".format(conda_url, channel) if url[-1] == '/': url = url[:-1] plat = self.conda_platform() repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json') worker = self.download_is_valid_url(repodata_url) worker.url = url return worker
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _aws_get_instance_by_tag(region, name, tag, raw): """Get all instances matching a tag."""
client = boto3.session.Session().client('ec2', region) matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', []) instances = [] [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation] return instances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aws_get_instances_by_id(region, instance_id, raw=True): """Returns instances mathing an id."""
client = boto3.session.Session().client('ec2', region) try: matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', []) except ClientError as exc: if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound': raise return [] instances = [] [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation] return instances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_instances_by_name(name, sort_by_order=('cloud', 'name'), projects=None, raw=True, regions=None, gcp_credentials=None, clouds=SUPPORTED_CLOUDS): """Get intsances from GCP and AWS by name."""
matching_instances = all_clouds_get_instances_by_name( name, projects, raw, credentials=gcp_credentials, clouds=clouds) if regions: matching_instances = [instance for instance in matching_instances if instance.region in regions] matching_instances.sort(key=lambda instance: [getattr(instance, field) for field in sort_by_order]) return matching_instances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_os_version(instance): """Get OS Version for instances."""
if instance.cloud == 'aws': client = boto3.client('ec2', instance.region) image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId'] return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04' if instance.cloud == 'gcp': credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) for disk in compute.instances().get(instance=instance.name, zone=instance.zone, project=instance.project).execute()['disks']: if not disk.get('boot'): continue for value in disk.get('licenses', []): if '1604' in value: return '16.04' if '1404' in value: return '14.04' return '14.04' return '14.04'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_volumes(instance): """Returns all the volumes of an instance."""
if instance.cloud == 'aws': client = boto3.client('ec2', instance.region) devices = client.describe_instance_attribute( InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', []) volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId'] for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', []) return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes} if instance.cloud == 'gcp': credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) volumes = {} for disk in compute.instances().get(instance=instance.id, zone=instance.zone, project=instance.project).execute()['disks']: index = disk['index'] name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id if 'local-ssd' in disk['deviceName']: size = 375.0 if 'local-ssd' in disk['deviceName']: size = 375.0 disk_type = 'local-ssd' else: disk_data = compute.disks().get(disk=name, zone=instance.zone, project=instance.project).execute() size = float(disk_data['sizeGb']) disk_type = 'pd-ssd' volumes[index] = {'size': size, 'type': disk['type'], 'deviceName': disk['deviceName'], 'interface': disk['interface'], 'diskType': disk_type} return volumes raise ValueError('Unknown cloud %s' % instance.cloud)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_persistent_address(instance): """Returns the public ip address of an instance."""
if instance.cloud == 'aws': client = boto3.client('ec2', instance.region) try: client.describe_addresses(PublicIps=[instance.ip_address]) return instance.ip_address except botocore.client.ClientError as exc: if exc.response.get('Error', {}).get('Code') != 'InvalidAddress.NotFound': raise # Address is not public return None if instance.cloud == 'gcp': credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) try: return compute.addresses().get(address=instance.name, project=instance.project, region=instance.region).execute()['address'] except errors.HttpError as exc: if 'was not found' in str(exc): return None raise raise ValueError('Unknown cloud %s' % instance.cloud)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Use pip to find pip installed packages in a given prefix."""
pip_packages = {} for package in pip.get_installed_distributions(): name = package.project_name version = package.version full_name = "{0}-{1}-pip".format(name.lower(), version) pip_packages[full_name] = {'version': version} data = json.dumps(pip_packages) print(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _save(file, data, mode='w+'): """ Write all data to created file. Also overwrite previous file. """
with open(file, mode) as fh: fh.write(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(obj): """ Merge contents. It does a simply merge of all files defined under 'static' key. function will render them and append to the merged output. To use the render option you have to define both 'config' and 'path' on merger dictionary. """
merge = '' for f in obj.get('static', []): print 'Merging: {}'. format(f) merge += _read(f) def doless(f): print 'Compiling LESS: {}'.format(f) ret, tmp = commands.getstatusoutput('lesscpy '+f) if ret == 0: return tmp else: print 'LESS to CSS failed for: {} (Do you have lesscpy installed?)'.format(f) return '' if merger.get('config'): #only imports django if we have a config file defined import re for p in merger['path']: sys.path.append(p) os.environ.setdefault("DJANGO_SETTINGS_MODULE", merger['config']) try: from django.template.loader import get_template_from_string from django.template.base import Context from django.utils.encoding import smart_str from django.conf import settings except: print 'Do you really have django well installed?' sys.exit(1) for f in obj.get('template', []): print 'Merging django template: {}'. format(f) t = _read(f) if settings.FORCE_SCRIPT_NAME: t = re.sub(r'\{%\s+url\b', settings.FORCE_SCRIPT_NAME+'{% url ', t) tmp = smart_str(get_template_from_string(t).render(Context({}))) if f.endswith('.less'): pass #TODO compilar tmp para css merge += tmp for f in obj.get('less', []): merge += doless(f) return merge
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jsMin(data, file): """ Minify JS data and saves to file. Data should be a string will whole JS content, and file will be overwrited if exists. """
print 'Minifying JS... ', url = 'http://javascript-minifier.com/raw' #POST req = urllib2.Request(url, urllib.urlencode({'input': data})) try: f = urllib2.urlopen(req) response = f.read() f.close() print 'Final: {:.1f}%'.format(100.0*len(response)/len(data)) print 'Saving: {} ({:.2f}kB)'.format(file, len(response)/1024.0) _save(file, response) except: print 'Oops!! Failed :(' return 1 return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jpgMin(file, force=False): """ Try to optimise a JPG file. The original will be saved at the same place with '.original' appended to its name. Once a .original exists the function will ignore this file unless force is True. """
if not os.path.isfile(file+'.original') or force: data = _read(file, 'rb') _save(file+'.original', data, 'w+b') print 'Optmising JPG {} - {:.2f}kB'.format(file, len(data)/1024.0), url = 'http://jpgoptimiser.com/optimise' parts, headers = encode_multipart({}, {'input': {'filename': 'wherever.jpg', 'content': data}}) req = urllib2.Request(url, data=parts, headers=headers) try: f = urllib2.urlopen(req) response = f.read() f.close() print ' - {:.2f} - {:.1f}%'.format(len(response)/1024.0, 100.0*len(response)/len(data)) _save(file, response, 'w+b') except: print 'Oops!! Failed :(' return 1 else: print 'Ignoring file: {}'.format(file) return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(obj): """ Process each block of the merger object. """
#merge all static and templates and less files merged = merge(obj) #save the full file if name defined if obj.get('full'): print 'Saving: {} ({:.2f}kB)'.format(obj['full'], len(merged)/1024.0) _save(obj['full'], merged) else: print 'Full merged size: {:.2f}kB'.format(len(merged)/1024.0) #minify js and save to file if obj.get('jsmin'): jsMin(merged, obj['jsmin']) #minify css and save to file if obj.get('cssmin'): cssMin(merged, obj['cssmin'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def optimize(exp_rets, covs): """ Return parameters for portfolio optimization. Parameters exp_rets : ndarray Vector of expected returns for each investment.. covs : ndarray Covariance matrix for the given investments. Returns --------- a : ndarray The first vector (to be combined with target return as scalar) in the linear equation for optimal weights. b : ndarray The second (constant) vector in the linear equation for optimal weights. least_risk_ret : int The return achieved on the portfolio that combines the given equities so as to achieve the lowest possible risk. Notes --------- * The length of `exp_rets` must match the number of rows and columns in the `covs` matrix. * The weights for an optimal portfolio with expected return `ret` is given by the formula `w = ret * a + b` where `a` and `b` are the vectors returned here. The weights `w` for the portfolio with lowest risk are given by `w = least_risk_ret * a + b`. * An exception will be raised if the covariance matrix is singular or if each prospective investment has the same expected return. """
_cov_inv = np.linalg.inv(covs) # unit vector _u = np.ones((len(exp_rets))) # compute some dot products one time only _u_cov_inv = _u.dot(_cov_inv) _rets_cov_inv = exp_rets.dot(_cov_inv) # helper matrix for deriving Lagrange multipliers _m = np.empty((2, 2)) _m[0, 0] = _rets_cov_inv.dot(exp_rets) _m[0, 1] = _u_cov_inv.dot(exp_rets) _m[1, 0] = _rets_cov_inv.dot(_u) _m[1, 1] = _u_cov_inv.dot(_u) # compute values to return _m_inv = np.linalg.inv(_m) a = _m_inv[0, 0] * _rets_cov_inv + _m_inv[1, 0] * _u_cov_inv b = _m_inv[0, 1] * _rets_cov_inv + _m_inv[1, 1] * _u_cov_inv least_risk_ret = _m[0, 1] / _m[1, 1] return a, b, least_risk_ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def growthfromrange(rangegrowth, startdate, enddate): """ Annual growth given growth from start date to end date. """
_yrs = (pd.Timestamp(enddate) - pd.Timestamp(startdate)).total_seconds() /\ dt.timedelta(365.25).total_seconds() return yrlygrowth(rangegrowth, _yrs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def equities(country='US'): """ Return a DataFrame of current US equities. .. versionadded:: 0.4.0 .. versionchanged:: 0.5.0 Return a DataFrame Parameters country : str, optional Country code for equities to return, defaults to 'US'. Returns ------- eqs : :class:`pandas.DataFrame` DataFrame whose index is a list of all current ticker symbols. Columns are 'Security Name' (e.g. 'Zynerba Pharmaceuticals, Inc. - Common Stock') and 'Exchange' ('NASDAQ', 'NYSE', 'NYSE MKT', etc.) Examples -------- Notes ----- Currently only US markets are supported. """
nasdaqblob, otherblob = _getrawdata() eq_triples = [] eq_triples.extend(_get_nas_triples(nasdaqblob)) eq_triples.extend(_get_other_triples(otherblob)) eq_triples.sort() index = [triple[0] for triple in eq_triples] data = [triple[1:] for triple in eq_triples] return pd.DataFrame(data, index, columns=['Security Name', 'Exchange'], dtype=str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def straddle(self, strike, expiry): """ Metrics for evaluating a straddle. Parameters strike : numeric Strike price. expiry : date or date str (e.g. '2015-01-01') Expiration date. Returns metrics : DataFrame Metrics for evaluating straddle. """
_rows = {} _prices = {} for _opttype in _constants.OPTTYPES: _rows[_opttype] = _relevant_rows(self.data, (strike, expiry, _opttype,), "No key for {} strike {} {}".format(expiry, strike, _opttype)) _prices[_opttype] = _getprice(_rows[_opttype]) _eq = _rows[_constants.OPTTYPES[0]].loc[:, 'Underlying_Price'].values[0] _qt = _rows[_constants.OPTTYPES[0]].loc[:, 'Quote_Time'].values[0] _index = ['Call', 'Put', 'Credit', 'Underlying_Price', 'Quote_Time'] _vals = np.array([_prices['call'], _prices['put'], _prices['call'] + _prices['put'], _eq, _qt]) return pd.DataFrame(_vals, index=_index, columns=['Value'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(equity): """ Retrieve all current options chains for given equity. .. versionchanged:: 0.5.0 Eliminate special exception handling. Parameters equity : str Equity for which to retrieve options data. Returns optdata : :class:`~pynance.opt.core.Options` All options data for given equity currently available from Yahoo! Finance. Examples Basic usage:: To show useful information (expiration dates, stock price, quote time) when retrieving options data, you can chain the call to :func:`get` with :meth:`~pynance.opt.core.Options.info`:: Expirations: Stock: 15.93 Quote time: 2015-03-07 16:00 """
_optmeta = pdr.data.Options(equity, 'yahoo') _optdata = _optmeta.get_all_data() return Options(_optdata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(data_frame, **kwargs): """ Return a transformed DataFrame. Transform data_frame along the given axis. By default, each row will be normalized (axis=0). Parameters data_frame : DataFrame Data to be normalized. axis : int, optional 0 (default) to normalize each row, 1 to normalize each column. method : str, optional Valid methods are: - "vector" : Default for normalization by row (axis=0). Normalize along axis as a vector with norm `norm` - "last" : Linear normalization setting last value along the axis to `norm` - "first" : Default for normalization of columns (axis=1). Linear normalization setting first value along the given axis to `norm` - "mean" : Normalize so that the mean of each vector along the given axis is `norm` norm : float, optional Target value of normalization, defaults to 1.0. labels : DataFrame, optional Labels may be passed as keyword argument, in which case the label values will also be normalized and returned. Returns df : DataFrame Normalized data. labels : DataFrame, optional Normalized labels, if provided as input. Notes If labels are real-valued, they should also be normalized. .. Having row_norms as a numpy array should be benchmarked against using a DataFrame: http://stackoverflow.com/questions/12525722/normalize-data-in-pandas Note: This isn't a bottleneck. Using a feature set with 13k rows and 256 data_frame ('ge' from 1962 until now), the normalization was immediate. """
norm = kwargs.get('norm', 1.0) axis = kwargs.get('axis', 0) if axis == 0: norm_vector = _get_norms_of_rows(data_frame, kwargs.get('method', 'vector')) else: norm_vector = _get_norms_of_cols(data_frame, kwargs.get('method', 'first')) if 'labels' in kwargs: if axis == 0: return data_frame.apply(lambda col: col * norm / norm_vector, axis=0), \ kwargs['labels'].apply(lambda col: col * norm / norm_vector, axis=0) else: raise ValueError("label normalization incompatible with normalization by column") else: if axis == 0: return data_frame.apply(lambda col: col * norm / norm_vector, axis=0) else: return data_frame.apply(lambda row: row * norm / norm_vector, axis=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_norms_of_rows(data_frame, method): """ return a column vector containing the norm of each row """
if method == 'vector': norm_vector = np.linalg.norm(data_frame.values, axis=1) elif method == 'last': norm_vector = data_frame.iloc[:, -1].values elif method == 'mean': norm_vector = np.mean(data_frame.values, axis=1) elif method == 'first': norm_vector = data_frame.iloc[:, 0].values else: raise ValueError("no normalization method '{0}'".format(method)) return norm_vector
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, opttype, strike, expiry): """ Price as midpoint between bid and ask. Parameters opttype : str 'call' or 'put'. strike : numeric Strike price. expiry : date-like Expiration date. Can be a :class:`datetime.datetime` or a string that :mod:`pandas` can interpret as such, e.g. '2015-01-01'. Returns ------- out : float Examples -------- 0.94 """
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,), "No key for {} strike {} {}".format(expiry, strike, opttype)) return _getprice(_optrow)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def metrics(self, opttype, strike, expiry): """ Basic metrics for a specific option. Parameters opttype : str ('call' or 'put') strike : numeric Strike price. expiry : date-like Expiration date. Can be a :class:`datetime.datetime` or a string that :mod:`pandas` can interpret as such, e.g. '2015-01-01'. Returns ------- out : :class:`pandas.DataFrame` """
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,), "No key for {} strike {} {}".format(expiry, strike, opttype)) _index = ['Opt_Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int', 'Underlying_Price', 'Quote_Time'] _out = pd.DataFrame(index=_index, columns=['Value']) _out.loc['Opt_Price', 'Value'] = _opt_price = _getprice(_optrow) for _name in _index[2:]: _out.loc[_name, 'Value'] = _optrow.loc[:, _name].values[0] _eq_price = _out.loc['Underlying_Price', 'Value'] if opttype == 'put': _out.loc['Time_Val'] = _get_put_time_val(_opt_price, strike, _eq_price) else: _out.loc['Time_Val'] = _get_call_time_val(_opt_price, strike, _eq_price) return _out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def strikes(self, opttype, expiry): """ Retrieve option prices for all strikes of a given type with a given expiration. Parameters opttype : str ('call' or 'put') expiry : date-like Expiration date. Can be a :class:`datetime.datetime` or a string that :mod:`pandas` can interpret as such, e.g. '2015-01-01'. Returns df : :class:`pandas.DataFrame` eq : float Price of underlying. qt : datetime.datetime Time of quote. See Also -------- :meth:`exps` """
_relevant = _relevant_rows(self.data, (slice(None), expiry, opttype,), "No key for {} {}".format(expiry, opttype)) _index = _relevant.index.get_level_values('Strike') _columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int'] _df = pd.DataFrame(index=_index, columns=_columns) _underlying = _relevant.loc[:, 'Underlying_Price'].values[0] _quotetime = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime() for _col in _columns[2:]: _df.loc[:, _col] = _relevant.loc[:, _col].values _df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2. _set_tv_strike_ix(_df, opttype, 'Price', 'Time_Val', _underlying) return _df, _underlying, _quotetime
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exps(self, opttype, strike): """ Prices for given strike on all available dates. Parameters opttype : str ('call' or 'put') strike : numeric Returns df : :class:`pandas.DataFrame` eq : float Price of underlying. qt : :class:`datetime.datetime` Time of quote. See Also -------- :meth:`strikes` """
_relevant = _relevant_rows(self.data, (strike, slice(None), opttype,), "No key for {} {}".format(strike, opttype)) _index = _relevant.index.get_level_values('Expiry') _columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int'] _df = pd.DataFrame(index=_index, columns=_columns) _eq = _relevant.loc[:, 'Underlying_Price'].values[0] _qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime() for _col in _columns[2:]: _df.loc[:, _col] = _relevant.loc[:, _col].values _df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2. _set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike) return _df, _eq, _qt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def labeledfeatures(eqdata, featurefunc, labelfunc): """ Return features and labels for the given equity data. Each row of the features returned contains `2 * n_sessions + 1` columns (or 1 less if the constant feature is excluded). After the constant feature, if present, there will be `n_sessions` columns derived from daily growth of the given price column, which defaults to 'Adj Close'. There will then follow another `n_sessions` columns representing current volume as a multiple of average volume over the previous 252 (or other value determined by the user) sessions. The returned features are not centered or normalized because these adjustments need to be made after test or cross-validation data has been removed. The constant feature is prepended by default. The labels are derived from `eqdata` using `labelfunc`. Parameters eqdata : DataFrame Expected is a dataframe as return by the `get()` function. A column labeled 'Volume' must be present. featurefunc : function Function taking a dataframe of simple equity data as argument and returning a dataframe of features and an integer representing the number of rows that had to be skipped at the beginning of the index of the input dataframe. The rows skipped are used to synchronize the indices of features and labels. For example, if the features are composed of 4 successive daily returns, then the date of row 0 of features would be the same as the date of row 3 (counting from 0) of input data. So the corresponding `featurefunc` would return a dataframe and the value 3. labelfunc : function function for deriving labels from `eqdata`. `labelfunc` must take a single argument: `df`, a dataframe to which `labelfunc` will be applied. `labelfunc` should return a dataframe of labels followed by an int specifying the number of feature rows to skip at the end of the feature dataframe. For example, if features are relative prices 64 days out, these features will only be known up until 64 days before the data runs out. In order to properly align features and labels, the features should not include the last 64 rows that would otherwise be possible. Usage: `labels, skipatend = labelfunc(eqdata)` Returns ------- features : DataFrame The features derived from the given parameters. labels : DataFrame The labels derived from the given parameters. """
_size = len(eqdata.index) _labels, _skipatend = labelfunc(eqdata) _features, _skipatstart = featurefunc(eqdata.iloc[:(_size - _skipatend), :]) return _features, _labels.iloc[_skipatstart:, :]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def growth(interval, pricecol, eqdata): """ Retrieve growth labels. Parameters interval : int Number of sessions over which growth is measured. For example, if the value of 32 is passed for `interval`, the data returned will show the growth 32 sessions ahead for each data point. eqdata : DataFrame Data for evaluating growth. pricecol : str Column of `eqdata` to be used for prices (Normally 'Adj Close'). Returns -------- labels : DataFrame Growth labels for the specified period skipatend : int Number of rows skipped at the end of `eqdata` for the given labels. Used to synchronize labels and features. Examples """
size = len(eqdata.index) labeldata = eqdata.loc[:, pricecol].values[interval:] /\ eqdata.loc[:, pricecol].values[:(size - interval)] df = pd.DataFrame(data=labeldata, index=eqdata.index[:(size - interval)], columns=['Growth'], dtype='float64') return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sma(eqdata, **kwargs): """ simple moving average Parameters eqdata : DataFrame window : int, optional Lookback period for sma. Defaults to 20. outputcol : str, optional Column to use for output. Defaults to 'SMA'. selection : str, optional Column of eqdata on which to calculate sma. If `eqdata` has only 1 column, `selection` is ignored, and sma is calculated on that column. Defaults to 'Adj Close'. """
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1: _selection = kwargs.get('selection', 'Adj Close') _eqdata = eqdata.loc[:, _selection] else: _eqdata = eqdata _window = kwargs.get('window', 20) _outputcol = kwargs.get('outputcol', 'SMA') ret = pd.DataFrame(index=_eqdata.index, columns=[_outputcol], dtype=np.float64) ret.loc[:, _outputcol] = _eqdata.rolling(window=_window, center=False).mean().values.flatten() return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ema(eqdata, **kwargs): """ Exponential moving average with the given span. Parameters eqdata : DataFrame Must have exactly 1 column on which to calculate EMA span : int, optional Span for exponential moving average. Cf. `pandas.stats.moments.ewma <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.stats.moments.ewma.html>`_ and `additional Pandas documentation <http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions>`_. outputcol : str, optional Column to use for output. Defaults to 'EMA'. selection : str, optional Column of eqdata on which to calculate ema. If `eqdata` has only 1 column, `selection` is ignored, and ema is calculated on that column. Defaults to 'Adj Close'. Returns --------- emadf : DataFrame Exponential moving average using the given `span`. """
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1: _selection = kwargs.get('selection', 'Adj Close') _eqdata = eqdata.loc[:, _selection] else: _eqdata = eqdata _span = kwargs.get('span', 20) _col = kwargs.get('outputcol', 'EMA') _emadf = pd.DataFrame(index=_eqdata.index, columns=[_col], dtype=np.float64) _emadf.loc[:, _col] = _eqdata.ewm(span=_span, min_periods=0, adjust=True, ignore_na=False).mean().values.flatten() return _emadf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ema_growth(eqdata, **kwargs): """ Growth of exponential moving average. Parameters eqdata : DataFrame span : int, optional Span for exponential moving average. Defaults to 20. outputcol : str, optional. Column to use for output. Defaults to 'EMA Growth'. selection : str, optional Column of eqdata on which to calculate ema growth. If `eqdata` has only 1 column, `selection` is ignored, and ema growth is calculated on that column. Defaults to 'Adj Close'. Returns --------- out : DataFrame Growth of exponential moving average from one day to next """
_growth_outputcol = kwargs.get('outputcol', 'EMA Growth') _ema_outputcol = 'EMA' kwargs['outputcol'] = _ema_outputcol _emadf = ema(eqdata, **kwargs) return simple.growth(_emadf, selection=_ema_outputcol, outputcol=_growth_outputcol)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def growth_volatility(eqdata, **kwargs): """ Return the volatility of growth. Note that, like :func:`pynance.tech.simple.growth` but in contrast to :func:`volatility`, :func:`growth_volatility` applies directly to a dataframe like that returned by :func:`pynance.data.retrieve.get`, not necessarily to a single-column dataframe. Parameters eqdata : DataFrame Data from which to extract growth volatility. An exception will be raised if `eqdata` does not contain a column 'Adj Close' or an optional name specified by the `selection` parameter. window : int, optional Window on which to calculate volatility. Defaults to 20. selection : str, optional Column of eqdata on which to calculate volatility of growth. Defaults to 'Adj Close' outputcol : str, optional Column to use for output. Defaults to 'Growth Risk'. Returns --------- out : DataFrame Dataframe showing the volatility of growth over the specified `window`. """
_window = kwargs.get('window', 20) _selection = kwargs.get('selection', 'Adj Close') _outputcol = kwargs.get('outputcol', 'Growth Risk') _growthdata = simple.growth(eqdata, selection=_selection) return volatility(_growthdata, outputcol=_outputcol, window=_window)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ratio_to_ave(window, eqdata, **kwargs): """ Return values expressed as ratios to the average over some number of prior sessions. Parameters eqdata : DataFrame Must contain a column with name matching `selection`, or, if `selection` is not specified, a column named 'Volume' window : int Interval over which to calculate the average. Normally 252 (1 year) selection : str, optional Column to select for calculating ratio. Defaults to 'Volume' skipstartrows : int, optional Rows to skip at beginning in addition to the `window` rows that must be skipped to get the baseline volume. Defaults to 0. skipendrows : int, optional Rows to skip at end. Defaults to 0. outputcol : str, optional Name of column in output dataframe. Defaults to 'Ratio to Ave' Returns --------- out : DataFrame """
_selection = kwargs.get('selection', 'Volume') _skipstartrows = kwargs.get('skipstartrows', 0) _skipendrows = kwargs.get('skipendrows', 0) _outputcol = kwargs.get('outputcol', 'Ratio to Ave') _size = len(eqdata.index) _eqdata = eqdata.loc[:, _selection] _sma = _eqdata.iloc[:-1 - _skipendrows].rolling(window=window, center=False).mean().values _outdata = _eqdata.values[window + _skipstartrows:_size - _skipendrows] /\ _sma[window + _skipstartrows - 1:] _index = eqdata.index[window + _skipstartrows:_size - _skipendrows] return pd.DataFrame(_outdata, index=_index, columns=[_outputcol], dtype=np.float64)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(features, labels, regularization=0., constfeat=True): """ Run linear regression on the given data. .. versionadded:: 0.5.0 If a regularization parameter is provided, this function is a simplification and specialization of ridge regression, as implemented in `scikit-learn <http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge>`_. Setting `solver` to `'svd'` in :class:`sklearn.linear_model.Ridge` and equating our `regularization` with their `alpha` will yield the same results. Parameters features : ndarray Features on which to run linear regression. labels : ndarray Labels for the given features. Multiple columns of labels are allowed. regularization : float, optional Regularization parameter. Defaults to 0. constfeat : bool, optional Whether or not the first column of features is the constant feature 1. If True, the first column will be excluded from regularization. Defaults to True. Returns ------- model : ndarray Regression model for the given data. """
n_col = (features.shape[1] if len(features.shape) > 1 else 1) reg_matrix = regularization * np.identity(n_col, dtype='float64') if constfeat: reg_matrix[0, 0] = 0. # http://stackoverflow.com/questions/27476933/numpy-linear-regression-with-regularization return np.linalg.lstsq(features.T.dot(features) + reg_matrix, features.T.dot(labels))[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cal(self, opttype, strike, exp1, exp2): """ Metrics for evaluating a calendar spread. Parameters opttype : str ('call' or 'put') Type of option on which to collect data. strike : numeric Strike price. exp1 : date or date str (e.g. '2015-01-01') Earlier expiration date. exp2 : date or date str (e.g. '2015-01-01') Later expiration date. Returns metrics : DataFrame Metrics for evaluating spread. """
assert pd.Timestamp(exp1) < pd.Timestamp(exp2) _row1 = _relevant_rows(self.data, (strike, exp1, opttype,), "No key for {} strike {} {}".format(exp1, strike, opttype)) _row2 = _relevant_rows(self.data, (strike, exp2, opttype,), "No key for {} strike {} {}".format(exp2, strike, opttype)) _price1 = _getprice(_row1) _price2 = _getprice(_row2) _eq = _row1.loc[:, 'Underlying_Price'].values[0] _qt = _row1.loc[:, 'Quote_Time'].values[0] _index = ['Near', 'Far', 'Debit', 'Underlying_Price', 'Quote_Time'] _vals = np.array([_price1, _price2, _price2 - _price1, _eq, _qt]) return pd.DataFrame(_vals, index=_index, columns=['Value'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expand(fn, col, inputtype=pd.DataFrame): """ Wrap a function applying to a single column to make a function applying to a multi-dimensional dataframe or ndarray Parameters fn : function Function that applies to a series or vector. col : str or int Index of column to which to apply `fn`. inputtype : class or type Type of input to be expected by the wrapped function. Normally pd.DataFrame or np.ndarray. Defaults to pd.DataFrame. Returns wrapped : function Function that takes an input of type `inputtype` and applies `fn` to the specified `col`. """
if inputtype == pd.DataFrame: if isinstance(col, int): def _wrapper(*args, **kwargs): return fn(args[0].iloc[:, col], *args[1:], **kwargs) return _wrapper def _wrapper(*args, **kwargs): return fn(args[0].loc[:, col], *args[1:], **kwargs) return _wrapper elif inputtype == np.ndarray: def _wrapper(*args, **kwargs): return fn(args[0][:, col], *args[1:], **kwargs) return _wrapper raise TypeError("invalid input type")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_na(eqdata): """ Return false if `eqdata` contains no missing values. Parameters eqdata : DataFrame or ndarray Data to check for missing values (NaN, None) Returns answer : bool False iff `eqdata` contains no missing values. """
if isinstance(eqdata, pd.DataFrame): _values = eqdata.values else: _values = eqdata return len(_values[pd.isnull(_values)]) > 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_const(features): """ Prepend the constant feature 1 as first feature and return the modified feature set. Parameters features : ndarray or DataFrame """
content = np.empty((features.shape[0], features.shape[1] + 1), dtype='float64') content[:, 0] = 1. if isinstance(features, np.ndarray): content[:, 1:] = features return content content[:, 1:] = features.iloc[:, :].values cols = ['Constant'] + features.columns.tolist() return pd.DataFrame(data=content, index=features.index, columns=cols, dtype='float64')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromcols(selection, n_sessions, eqdata, **kwargs): """ Generate features from selected columns of a dataframe. Parameters selection : list or tuple of str Columns to be used as features. n_sessions : int Number of sessions over which to create features. eqdata : DataFrame Data from which to generate feature set. Must contain as columns the values from which the features are to be generated. constfeat : bool, optional Whether or not the returned features will have the constant feature. Returns features : DataFrame """
_constfeat = kwargs.get('constfeat', True) _outcols = ['Constant'] if _constfeat else [] _n_rows = len(eqdata.index) for _col in selection: _outcols += map(partial(_concat, strval=' ' + _col), range(-n_sessions + 1, 1)) _features = pd.DataFrame(index=eqdata.index[n_sessions - 1:], columns=_outcols, dtype=np.float64) _offset = 0 if _constfeat: _features.iloc[:, 0] = 1. _offset += 1 for _col in selection: _values = eqdata.loc[:, _col].values for i in range(n_sessions): _features.iloc[:, _offset + i] = _values[i:_n_rows - n_sessions + i + 1] _offset += n_sessions return _features
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromfuncs(funcs, n_sessions, eqdata, **kwargs): """ Generate features using a list of functions to apply to input data Parameters funcs : list of function Functions to apply to eqdata. Each function is expected to output a dataframe with index identical to a slice of `eqdata`. The slice must include at least `eqdata.index[skipatstart + n_sessions - 1:]`. Each function is also expected to have a function attribute `title`, which is used to generate the column names of the output features. n_sessions : int Number of sessions over which to create features. eqdata : DataFrame Data from which to generate features. The data will often be retrieved using `pn.get()`. constfeat : bool, optional Whether or not the returned features will have the constant feature. skipatstart : int, optional Number of rows to omit at the start of the output DataFrame. This parameter is necessary if any of the functions requires a rampup period before returning valid results, e.g. `sma()` or functions calculating volume relative to a past baseline. Defaults to 0. Returns features : DataFrame """
_skipatstart = kwargs.get('skipatstart', 0) _constfeat = kwargs.get('constfeat', True) _outcols = ['Constant'] if _constfeat else [] _n_allrows = len(eqdata.index) _n_featrows = _n_allrows - _skipatstart - n_sessions + 1 for _func in funcs: _outcols += map(partial(_concat, strval=' ' + _func.title), range(-n_sessions + 1, 1)) _features = pd.DataFrame(index=eqdata.index[_skipatstart + n_sessions - 1:], columns=_outcols, dtype=np.float64) _offset = 0 if _constfeat: _features.iloc[:, 0] = 1. _offset += 1 for _func in funcs: _values = _func(eqdata).values _n_values = len(_values) for i in range(n_sessions): _val_end = _n_values - n_sessions + i + 1 _features.iloc[:, _offset + i] = _values[_val_end - _n_featrows:_val_end] _offset += n_sessions return _features
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ln_growth(eqdata, **kwargs): """ Return the natural log of growth. See also -------- :func:`growth` """
if 'outputcol' not in kwargs: kwargs['outputcol'] = 'LnGrowth' return np.log(growth(eqdata, **kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mse(predicted, actual): """ Mean squared error of predictions. .. versionadded:: 0.5.0 Parameters predicted : ndarray Predictions on which to measure error. May contain a single or multiple column but must match `actual` in shape. actual : ndarray Actual values against which to measure predictions. Returns ------- err : ndarray Mean squared error of predictions relative to actual values. """
diff = predicted - actual return np.average(diff * diff, axis=0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(eqprice, callprice, strike, shares=1, buycomm=0., excomm=0., dividend=0.): """ Metrics for covered calls. Parameters eqprice : float Price at which stock is purchased. callprice : float Price for which call is sold. strike : float Strike price of call sold. shares : int, optional Number of shares of stock. Defaults to 1. buycomm : float, optional Commission paid on total initial purchase. excomm : float optional Commission to be paid if option is exercised. dividend : float, optional Total dividends per share expected between purchase and expiration. Returns metrics : :class:`pandas.DataFrame` Investment metrics Notes ----- Cf. Lawrence McMillan, Options as a Strategic Investment, 5th ed., p. 43 """
_index = ['Eq Cost', 'Option Premium', 'Commission', 'Total Invested', 'Dividends', 'Eq if Ex', 'Comm if Ex', 'Profit if Ex', 'Ret if Ex', 'Profit if Unch', 'Ret if Unch', 'Break_Even Price', 'Protection Pts', 'Protection Pct'] _metrics = pd.DataFrame(index=_index, columns=['Value']) _shares = float(shares) _dividends = _shares * dividend _metrics.loc['Eq Cost', 'Value'] = _eqcost = _shares * eqprice _metrics.loc['Option Premium', 'Value'] = _optprem = _shares * callprice _metrics.loc['Commission', 'Value'] = float(buycomm) _metrics.loc['Total Invested', 'Value'] = _invested = _eqcost - _optprem + buycomm _metrics.loc['Dividends', 'Value'] = _dividends _metrics.loc['Eq if Ex', 'Value'] = _eqsale = strike * _shares _metrics.loc['Comm if Ex', 'Value'] = float(excomm) _metrics.loc['Profit if Ex', 'Value'] = _profitex = _eqsale + _dividends - _invested - excomm _metrics.loc['Ret if Ex', 'Value'] = round(_profitex / _invested, _constants.NDIGITS_SIG) _metrics.loc['Profit if Unch', 'Value'] = _profitunch = _eqcost + _dividends - _invested _metrics.loc['Ret if Unch', 'Value'] = round(_profitunch / _invested, _constants.NDIGITS_SIG) _metrics.loc['Break_Even Price', 'Value'] = _breakeven = round((_invested - _dividends) / _shares, _constants.NDIGITS_SIG) _metrics.loc['Protection Pts', 'Value'] = _protpts = eqprice - _breakeven _metrics.loc['Protection Pct', 'Value'] = round(_protpts / eqprice, _constants.NDIGITS_SIG) return _metrics
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_bday(date, bday=None): """ Return true iff the given date is a business day. Parameters date : :class:`pandas.Timestamp` Any value that can be converted to a pandas Timestamp--e.g., '2012-05-01', dt.datetime(2012, 5, 1, 3) bday : :class:`pandas.tseries.offsets.CustomBusinessDay` Defaults to `CustomBusinessDay(calendar=USFederalHolidayCalendar())`. Pass this parameter in performance-sensitive contexts, such as when calling this function in a loop. The creation of the `CustomBusinessDay` object is the performance bottleneck of this function. Cf. `pandas.tseries.offsets.CustomBusinessDay <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#custom-business-days-experimental>`_. Returns ------- val : bool True iff `date` is a business day """
_date = Timestamp(date) if bday is None: bday = CustomBusinessDay(calendar=USFederalHolidayCalendar()) return _date == (_date + bday) - bday
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare(eq_dfs, columns=None, selection='Adj Close'): """ Get the relative performance of multiple equities. .. versionadded:: 0.5.0 Parameters eq_dfs : list or tuple of DataFrame Performance data for multiple equities over a consistent time frame. columns : iterable of str, default None Labels to use for the columns of the output DataFrame. The labels, if provided, should normally be the names of the equities whose performance is being compared. selection : str, default 'Adj Close' Column containing prices to be compared. Defaults to 'Adj Close'. Returns ------- rel_perf : DataFrame A DataFrame whose columns contain normalized data for each equity represented in `eq_dfs`. The initial price for each equity will be normalized to 1.0. Examples -------- .. code-block:: python import pynance as pn eqs = ('FSLR', 'SCTY', 'SPWR') eq_dfs = [] for eq in eqs: eq_dfs.append(pn.data.get(eq, '2016')) rel_perf = pn.data.compare(eq_dfs, eqs) Notes ----- Each set of data passed in `eq_dfs` is assumed to have the same start and end dates as the other data sets. """
content = np.empty((eq_dfs[0].shape[0], len(eq_dfs)), dtype=np.float64) rel_perf = pd.DataFrame(content, eq_dfs[0].index, columns, dtype=np.float64) for i in range(len(eq_dfs)): rel_perf.iloc[:, i] = eq_dfs[i].loc[:, selection] / eq_dfs[i].iloc[0].loc[selection] return rel_perf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diagbtrfly(self, lowstrike, midstrike, highstrike, expiry1, expiry2): """ Metrics for evaluating a diagonal butterfly spread. Parameters opttype : str ('call' or 'put') Type of option on which to collect data. lowstrike : numeric Lower strike price. To be used for far put. midstrike : numeric Middle strike price. To be used for near straddle. Typically at the money. highstrike : numeric Higher strike price. To be used for far call. expiry1 : date or date str (e.g. '2015-01-01') Earlier expiration date. expiry2 : date or date str (e.g. '2015-01-01') Later expiration date. Returns metrics : DataFrame Metrics for evaluating spread. """
assert lowstrike < midstrike assert midstrike < highstrike assert pd.Timestamp(expiry1) < pd.Timestamp(expiry2) _rows1 = {} _rows2 = {} _prices1 = {} _prices2 = {} _index = ['Straddle Call', 'Straddle Put', 'Straddle Total', 'Far Call', 'Far Put', 'Far Total', 'Straddle to Far Ratio', 'Credit', 'Underlying_Price', 'Quote_Time'] _metrics = pd.DataFrame(index=_index, columns=['Value']) _errmsg = "No key for {} strike {} {}" _opttype = 'call' _rows1[_opttype] = _relevant_rows(self.data, (midstrike, expiry1, _opttype), _errmsg.format(expiry1, midstrike, _opttype)) _prices1[_opttype] = _getprice(_rows1[_opttype]) _rows2[_opttype] = _relevant_rows(self.data, (highstrike, expiry2, _opttype), _errmsg.format(expiry2, highstrike, _opttype)) _prices2[_opttype] = _getprice(_rows2[_opttype]) _metrics.loc['Straddle Call', 'Value'] = _prices1[_opttype] _metrics.loc['Far Call', 'Value'] = _prices2[_opttype] _metrics.loc['Underlying_Price', 'Value'], _metrics.loc['Quote_Time', 'Value'] =\ _getkeys(_rows1[_opttype], ['Underlying_Price', 'Quote_Time']) _opttype = 'put' _rows1[_opttype] = _relevant_rows(self.data, (midstrike, expiry1, _opttype), _errmsg.format(expiry1, midstrike, _opttype)) _prices1[_opttype] = _getprice(_rows1[_opttype]) _rows2[_opttype] = _relevant_rows(self.data, (lowstrike, expiry2, _opttype), _errmsg.format(expiry2, lowstrike, _opttype)) _prices2[_opttype] = _getprice(_rows2[_opttype]) _metrics.loc['Straddle Put', 'Value'] = _prices1[_opttype] _metrics.loc['Far Put', 'Value'] = _prices2[_opttype] _metrics.loc['Straddle Total', 'Value'] = _neartot = sum(_prices1.values()) _metrics.loc['Far Total', 'Value'] = _fartot = sum(_prices2.values()) _metrics.loc['Straddle to Far Ratio', 'Value'] = _neartot / _fartot _metrics.loc['Credit', 'Value'] = _neartot - _fartot return _metrics
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def info(self): """ Show expiration dates, equity price, quote time. Returns ------- self : :class:`~pynance.opt.core.Options` Returns a reference to the calling object to allow chaining. expiries : :class:`pandas.tseries.index.DatetimeIndex` Examples -------- Expirations: Stock: 16.25 Quote time: 2015-03-01 16:00 """
print("Expirations:") _i = 0 for _datetime in self.data.index.levels[1].to_pydatetime(): print("{:2d} {}".format(_i, _datetime.strftime('%Y-%m-%d'))) _i += 1 print("Stock: {:.2f}".format(self.data.iloc[0].loc['Underlying_Price'])) print("Quote time: {}".format(self.quotetime().strftime('%Y-%m-%d %H:%M%z'))) return self, self.exps()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tolist(self): """ Return the array as a list of rows. Each row is a `dict` of values. Facilitates inserting data into a database. .. versionadded:: 0.3.1 Returns ------- quotes : list A list in which each entry is a dictionary representing a single options quote. """
return [_todict(key, self.data.loc[key, :]) for key in self.data.index]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_username(self): """ Generate a unique username """
while True: # Generate a UUID username, removing dashes and the last 2 chars # to make it fit into the 30 char User.username field. Gracefully # handle any unlikely, but possible duplicate usernames. username = str(uuid.uuid4()) username = username.replace('-', '') username = username[:-2] try: User.objects.get(username=username) except User.DoesNotExist: return username
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_model_cache(table_name): """ Updates model cache by generating a new key for the model """
model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex) model_cache_backend.share_model_cache_info(model_cache_info)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invalidate_model_cache(sender, instance, **kwargs): """ Signal receiver for models to invalidate model cache of sender and related models. Model cache is invalidated by generating new key for each model. Parameters ~~~~~~~~~~ sender The model class instance The actual instance being saved. """
logger.debug('Received post_save/post_delete signal from sender {0}'.format(sender)) if django.VERSION >= (1, 8): related_tables = set( [f.related_model._meta.db_table for f in sender._meta.get_fields() if f.related_model is not None and (((f.one_to_many or f.one_to_one) and f.auto_created) or f.many_to_one or (f.many_to_many and not f.auto_created))]) else: related_tables = set([rel.model._meta.db_table for rel in sender._meta.get_all_related_objects()]) # temporary fix for m2m relations with an intermediate model, goes away after better join caching related_tables |= set([field.rel.to._meta.db_table for field in sender._meta.fields if issubclass(type(field), RelatedField)]) logger.debug('Related tables of sender {0} are {1}'.format(sender, related_tables)) update_model_cache(sender._meta.db_table) for related_table in related_tables: update_model_cache(related_table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invalidate_m2m_cache(sender, instance, model, **kwargs): """ Signal receiver for models to invalidate model cache for many-to-many relationship. Parameters ~~~~~~~~~~ sender The model class instance The instance whose many-to-many relation is updated. model The class of the objects that are added to, removed from or cleared from the relation. """
logger.debug('Received m2m_changed signals from sender {0}'.format(sender)) update_model_cache(instance._meta.db_table) update_model_cache(model._meta.db_table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_key(self): """ Generate cache key for the current query. If a new key is created for the model it is then shared with other consumers. """
sql = self.sql() key, created = self.get_or_create_model_key() if created: db_table = self.model._meta.db_table logger.debug('created new key {0} for model {1}'.format(key, db_table)) model_cache_info = ModelCacheInfo(db_table, key) model_cache_backend.share_model_cache_info(model_cache_info) query_key = u'{model_key}{qs}{db}'.format(model_key=key, qs=sql, db=self.db) key = hashlib.md5(query_key.encode('utf-8')).hexdigest() return key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sql(self): """ Get sql for the current query. """
clone = self.query.clone() sql, params = clone.get_compiler(using=self.db).as_sql() return sql % params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_or_create_model_key(self): """ Get or create key for the model. Returns ~~~~~~~ (model_key, boolean) tuple """
model_cache_info = model_cache_backend.retrieve_model_cache_info(self.model._meta.db_table) if not model_cache_info: return uuid.uuid4().hex, True return model_cache_info.table_key, False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invalidate_model_cache(self): """ Invalidate model cache by generating new key for the model. """
logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table)) if django.VERSION >= (1, 8): related_tables = set( [f.related_model._meta.db_table for f in self.model._meta.get_fields() if ((f.one_to_many or f.one_to_one) and f.auto_created) or f.many_to_one or (f.many_to_many and not f.auto_created)]) else: related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()]) # temporary fix for m2m relations with an intermediate model, goes away after better join caching related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)]) logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables)) update_model_cache(self.model._meta.db_table) for related_table in related_tables: update_model_cache(related_table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cache_backend(self): """ Get the cache backend Returns ~~~~~~~ Django cache backend """
if not hasattr(self, '_cache_backend'): if hasattr(django.core.cache, 'caches'): self._cache_backend = django.core.cache.caches[_cache_name] else: self._cache_backend = django.core.cache.get_cache(_cache_name) return self._cache_backend
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_file(filename): """ Import a file that will trigger the population of Orca. Parameters filename : str """
pathname, filename = os.path.split(filename) modname = re.match( r'(?P<modname>\w+)\.py', filename).group('modname') file, path, desc = imp.find_module(modname, [pathname]) try: imp.load_module(modname, file, path, desc) finally: file.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_is_table(func): """ Decorator that will check whether the "table_name" keyword argument to the wrapped function matches a registered Orca table. """
@wraps(func) def wrapper(**kwargs): if not orca.is_table(kwargs['table_name']): abort(404) return func(**kwargs) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_is_column(func): """ Decorator that will check whether the "table_name" and "col_name" keyword arguments to the wrapped function match a registered Orca table and column. """
@wraps(func) def wrapper(**kwargs): table_name = kwargs['table_name'] col_name = kwargs['col_name'] if not orca.is_table(table_name): abort(404) if col_name not in orca.get_table(table_name).columns: abort(404) return func(**kwargs) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_is_injectable(func): """ Decorator that will check whether the "inj_name" keyword argument to the wrapped function matches a registered Orca injectable. """
@wraps(func) def wrapper(**kwargs): name = kwargs['inj_name'] if not orca.is_injectable(name): abort(404) return func(**kwargs) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def schema(): """ All tables, columns, steps, injectables and broadcasts registered with Orca. Includes local columns on tables. """
tables = orca.list_tables() cols = {t: orca.get_table(t).columns for t in tables} steps = orca.list_steps() injectables = orca.list_injectables() broadcasts = orca.list_broadcasts() return jsonify( tables=tables, columns=cols, steps=steps, injectables=injectables, broadcasts=broadcasts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_preview(table_name): """ Returns the first five rows of a table as JSON. Inlcudes all columns. Uses Pandas' "split" JSON format. """
preview = orca.get_table(table_name).to_frame().head() return ( preview.to_json(orient='split', date_format='iso'), 200, {'Content-Type': 'application/json'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_describe(table_name): """ Return summary statistics of a table as JSON. Includes all columns. Uses Pandas' "split" JSON format. """
desc = orca.get_table(table_name).to_frame().describe() return ( desc.to_json(orient='split', date_format='iso'), 200, {'Content-Type': 'application/json'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_definition(table_name): """ Get the source of a table function. If a table is registered DataFrame and not a function then all that is returned is {'type': 'dataframe'}. If the table is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """
if orca.table_type(table_name) == 'dataframe': return jsonify(type='dataframe') filename, lineno, source = \ orca.get_raw_table(table_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify( type='function', filename=filename, lineno=lineno, text=source, html=html)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_groupbyagg(table_name): """ Perform a groupby on a table and return an aggregation on a single column. This depends on some request parameters in the URL. "column" and "agg" must always be present, and one of "by" or "level" must be present. "column" is the table column on which aggregation will be performed, "agg" is the aggregation that will be performed, and "by"/"level" define how to group the data. Supported "agg" parameters are: mean, median, std, sum, and size. """
table = orca.get_table(table_name) # column to aggregate column = request.args.get('column', None) if not column or column not in table.columns: abort(400) # column or index level to group by by = request.args.get('by', None) level = request.args.get('level', None) if (not by and not level) or (by and level): abort(400) # aggregation type agg = request.args.get('agg', None) if not agg or agg not in _GROUPBY_AGG_MAP: abort(400) column = table.get_column(column) # level can either be an integer level number or a string level name. # try converting to integer, but if that doesn't work # we go ahead with the string. if level: try: level = int(level) except ValueError: pass gby = column.groupby(level=level) else: by = table.get_column(by) gby = column.groupby(by) result = _GROUPBY_AGG_MAP[agg](gby) return ( result.to_json(orient='split', date_format='iso'), 200, {'Content-Type': 'application/json'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_preview(table_name, col_name): """ Return the first ten elements of a column as JSON in Pandas' "split" format. """
col = orca.get_table(table_name).get_column(col_name).head(10) return ( col.to_json(orient='split', date_format='iso'), 200, {'Content-Type': 'application/json'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_definition(table_name, col_name): """ Get the source of a column function. If a column is a registered Series and not a function then all that is returned is {'type': 'series'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """
col_type = orca.get_table(table_name).column_type(col_name) if col_type != 'function': return jsonify(type=col_type) filename, lineno, source = \ orca.get_raw_column(table_name, col_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify( type='function', filename=filename, lineno=lineno, text=source, html=html)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_describe(table_name, col_name): """ Return summary statistics of a column as JSON. Uses Pandas' "split" JSON format. """
col_desc = orca.get_table(table_name).get_column(col_name).describe() return ( col_desc.to_json(orient='split'), 200, {'Content-Type': 'application/json'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_csv(table_name, col_name): """ Return a column as CSV using Pandas' default CSV output. """
csv = orca.get_table(table_name).get_column(col_name).to_csv(path=None) return csv, 200, {'Content-Type': 'text/csv'}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def injectable_repr(inj_name): """ Returns the type and repr of an injectable. JSON response has "type" and "repr" keys. """
i = orca.get_injectable(inj_name) return jsonify(type=str(type(i)), repr=repr(i))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def injectable_definition(inj_name): """ Get the source of an injectable function. If an injectable is a registered Python variable and not a function then all that is returned is {'type': 'variable'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """
inj_type = orca.injectable_type(inj_name) if inj_type == 'variable': return jsonify(type='variable') else: filename, lineno, source = \ orca.get_injectable_func_source_data(inj_name) html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify( type='function', filename=filename, lineno=lineno, text=source, html=html)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_broadcasts(): """ List all registered broadcasts as a list of objects with keys "cast" and "onto". """
casts = [{'cast': b[0], 'onto': b[1]} for b in orca.list_broadcasts()] return jsonify(broadcasts=casts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def broadcast_definition(cast_name, onto_name): """ Return the definition of a broadcast as an object with keys "cast", "onto", "cast_on", "onto_on", "cast_index", and "onto_index". These are the same as the arguments to the ``broadcast`` function. """
if not orca.is_broadcast(cast_name, onto_name): abort(404) b = orca.get_broadcast(cast_name, onto_name) return jsonify( cast=b.cast, onto=b.onto, cast_on=b.cast_on, onto_on=b.onto_on, cast_index=b.cast_index, onto_index=b.onto_index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def step_definition(step_name): """ Get the source of a step function. Returned object has keys "filename", "lineno", "text" and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """
if not orca.is_step(step_name): abort(404) filename, lineno, source = \ orca.get_step(step_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify(filename=filename, lineno=lineno, text=source, html=html)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_log_handler( handler, level=None, fmt=None, datefmt=None, propagate=None): """ Add a logging handler to Orca. Parameters handler : logging.Handler subclass level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages. propagate : bool, optional Whether the Orca logger should propagate. If None the propagation will not be modified, otherwise it will be set to this value. """
if not fmt: fmt = US_LOG_FMT if not datefmt: datefmt = US_LOG_DATE_FMT handler.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt)) if level is not None: handler.setLevel(level) logger = logging.getLogger('orca') logger.addHandler(handler) if propagate is not None: logger.propagate = propagate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_to_stream(level=None, fmt=None, datefmt=None): """ Send log messages to the console. Parameters level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages. """
_add_log_handler( logging.StreamHandler(), fmt=fmt, datefmt=datefmt, propagate=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_all(): """ Clear any and all stored state from Orca. """
_TABLES.clear() _COLUMNS.clear() _STEPS.clear() _BROADCASTS.clear() _INJECTABLES.clear() _TABLE_CACHE.clear() _COLUMN_CACHE.clear() _INJECTABLE_CACHE.clear() for m in _MEMOIZED.values(): m.value.clear_cached() _MEMOIZED.clear() logger.debug('pipeline state cleared')