text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateFile(cls, file_, url):
"""Check and update file compares with remote_url Args: file_: str. Local filename. Normally it's __file__ Returns: bool: file updated or not """ |
def compare(s1, s2):
return s1 == s2, len(s2) - len(s1)
if not url or not file_:
return False
try:
req = urllib.request.urlopen(url)
raw_codes = req.read()
with open(file_, 'rb') as f:
current_codes = f.read().replace(b'\r', b'')
is_same, diff = compare(current_codes, raw_codes)
if is_same:
cit.info("{} is already up-to-date.".format(file_))
return False
else:
cit.ask("A new version is available. Update? (Diff: {})".format(diff))
if cit.get_choice(['Yes', 'No']) == 'Yes':
with open(file_, 'wb') as f:
f.write(raw_codes)
cit.info("Update Success.")
return True
else:
cit.warn("Update Canceled")
return False
except Exception as e:
cit.err("{f} update failed: {e}".format(f=file_, e=e))
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ajax(cls, url, param={}, method='get'):
"""Get info by ajax Args: url: string Returns: dict: json decoded into a dict """ |
param = urllib.parse.urlencode(param)
if method.lower() == 'get':
req = urllib.request.Request(url + '?' + param)
elif method.lower() == 'post':
param = param.encode('utf-8')
req = urllib.request.Request(url, data=param)
else:
raise Exception("invalid method '{}' (GET/POST)".format(method))
rsp = urllib.request.urlopen(req)
if rsp:
rsp_json = rsp.read().decode('utf-8')
rsp_dict = json.loads(rsp_json)
return rsp_dict
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dir_meta(fp, atts):
"""Pop path information and map to supplied atts """ |
# Attibutes are popped from deepest directory first
atts.reverse()
dirname = os.path.split(fp)[0]
meta = dirname.split('/')
res = {}
try:
for key in atts:
res[key] = meta.pop()
except IndexError:
raise PathError(dirname)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trionyx(request):
"""Add trionyx context data""" |
return {
'TX_APP_NAME': settings.TX_APP_NAME,
'TX_LOGO_NAME_START': settings.TX_LOGO_NAME_START,
'TX_LOGO_NAME_END': settings.TX_LOGO_NAME_END,
'TX_LOGO_NAME_SMALL_START': settings.TX_LOGO_NAME_SMALL_START,
'TX_LOGO_NAME_SMALL_END': settings.TX_LOGO_NAME_SMALL_END,
'trionyx_menu_items': app_menu.get_menu_items(),
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def register(self, pattern, view=None):
'''Allow decorator-style construction of URL pattern lists.'''
if view is None:
return partial(self.register, pattern)
self.patterns.append(self._make_url((pattern, view)))
return view |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fields(cls, inlcude_base=False, include_id=False):
"""Get model fields""" |
for field in cls._meta.fields:
if field.name == 'deleted':
continue
if not include_id and field.name == 'id':
continue
if not inlcude_base and field.name in ['created_at', 'updated_at']:
continue
yield field |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_absolute_url(self):
"""Get model url""" |
return reverse('trionyx:model-view', kwargs={
'app': self._meta.app_label,
'model': self._meta.model_name,
'pk': self.id
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
"""Parse an asset from Earth Engine to STAC item Raises: ValueError -- If asset is not of type Image or ImageCollection Returns: Item -- STAC feature of the Google Earth Engine Asset Collection -- STAC collection of the Google Earth Engine Asset """ |
if self.type == TOKEN_TYPE[0][1]:
try:
return Item(
item_id=self._link(None, None)[1],
links=self._link(None, None)[0],
assets=self._asset(None),
properties=self._properties(None)[0],
geometry=self._properties(None)[2]
)
except ValidationError as e:
raise
elif self.type == TOKEN_TYPE[1][1]:
try:
# parallelize item computation
items = [self._features_iterator(
feature['id'],
self._link(feature, data.ASSET_TYPE_IMAGE_COLL)[0],
self._asset(
feature['properties']['system:index']
),
self._properties(feature)[0],
self._properties(feature)[2]
) for feature in self._get_full_info()['features']]
res_list = dask.compute(items)[0]
return Collection(
collection_id=self._get_info()['id'],
features=res_list
)
except ValidationError as e:
raise
else:
raise TypeError("Unrecognized Stac type found.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Genra(request):
""" Generate dict of Dept and its grade. """ |
school = request.GET['school']
c = Course(school=school)
return JsonResponse(c.getGenra(), safe=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Remove(self,directory,filename):
"""Deletes post from wordpress""" |
db = self._loadDB(directory)
logger.debug("wp: Attempting to remove %s from wp"%(filename))
# See if this already exists in our DB
if db.has_key(filename):
pid=db[filename]
logger.debug('wp: Found %s in DB with post id %s'%(filename,pid))
else:
print("wp: %s not in our local DB file [%s]"\
%(filename,self.DB_FILE))
return False
self._connectToWP()
self.wp.call(DeletePost(pid))
del db[filename]
self._saveDB(directory,db)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eq(self, event_property, value):
"""An equals filter chain. request(elapsed_ms).eq(path, "/") """ |
c = self.copy()
c.filters.append(filters.EQ(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ne(self, event_property, value):
"""A not-equal filter chain. request(elapsed_ms).ne(path, "/") """ |
c = self.copy()
c.filters.append(filters.NE(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lt(self, event_property, value):
"""A less-than filter chain. request(elapsed_ms).lt(elapsed_ms, 500) """ |
c = self.copy()
c.filters.append(filters.LT(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def le(self, event_property, value):
"""A less-than-or-equal-to filter chain. request(elapsed_ms).le(elapsed_ms, 500) """ |
c = self.copy()
c.filters.append(filters.LE(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gt(self, event_property, value):
"""A greater-than filter chain. request(elapsed_ms).gt(elapsed_ms, 500) """ |
c = self.copy()
c.filters.append(filters.GT(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ge(self, event_property, value):
"""A greater-than-or-equal-to filter chain. request(elapsed_ms).ge(elapsed_ms, 500) """ |
c = self.copy()
c.filters.append(filters.GE(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def re(self, event_property, value):
"""A regular expression filter chain. request(elapsed_ms).re(path, "[^A-Za-z0-9+]") """ |
c = self.copy()
c.filters.append(filters.RE(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def startswith(self, event_property, value):
"""A starts-with filter chain. request(elapsed_ms).re(path, "^/cube") """ |
c = self.copy()
c.filters.append(filters.RE(event_property, "^{value}".format(
value=value)))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_array(self, event_property, value):
"""An in-array filter chain. request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"]) request(elapsed_ms).in(path, ["/event", "/"]) """ |
c = self.copy()
c.filters.append(filters.IN(event_property, value))
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_multiple_data():
"""Get data from all the platforms listed in makerlabs.""" |
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_timeline(source):
"""Rebuild a timeline of the history of makerlabs.""" |
# Set up the pandas timeseries dataframe
timeline_format = ["name", "type", "source", "country", "city", "latitude",
"longitude", "website_url", "twitter_url",
"facebook_page_url", "facebook_group_url",
"whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end",
"facebook_start", "facebook_end"]
timeline = pd.DataFrame(timeline_format)
# Getdata from all the mapped platforms
if source.lower() == "diybio.org":
data = diybio_org.get_labs(format="dict")
elif source.lower() == "fablabs_io":
data = fablabs_io.get_labs(format="dict")
elif source.lower() == "makeinitaly_foundation":
data = makeinitaly_foundation.get_labs(format="dict")
elif source.lower() == "hackaday_io":
data = hackaday_io.get_labs(format="dict")
elif source.lower() == "hackerspaces_org":
data = hackerspaces_org.get_labs(format="dict")
elif source.lower() == "makery_info":
data = makery_info.get_labs(format="dict")
elif source.lower() == "nesta":
data = nesta.get_labs(format="dict")
elif source.lower() == "all":
pass
# Fill the dataframe with basic details
for lab in labs_data:
for link in lab.links:
print link
if "twitter" in link:
print link
if "facebook" in link:
print link
lab_dataframe_dict = {"name": lab.name,
"type": lab.lab_type,
"source": lab.source,
"country": lab.country,
"city": lab.city,
"latitude": lab.latitude,
"longitude": lab.longitude,
"website_url": lab.url}
timeline.append(lab_dataframe_dict)
["name", "type", "source", "country", "city", "lat", "long",
"website_url", "twitter_url", "facebook_page_url",
"facebook_group_url", "whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end", "facebook_start",
"facebook_end"]
# Get time data from platforms, whenever possible
# Get domain data (whois)
# Get subdomain data (Internet Archive)
# Get social media data (Twitter)
# Get social media data (Facebook)
return timeline |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_genotypes(self):
"""This really just intializes the file by opening it up. """ |
if DataParser.compressed_pedigree:
self.genotype_file = gzip.open("%s.gz" % self.tped_file, 'rb')
else:
self.genotype_file = open(self.tped_file)
self.filter_missing() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_genotypes(self, data):
"""Parse pedigree line and remove excluded individuals from geno Translates alleles into numerical genotypes (0, 1, 2) counting number of minor alleles. Throws exceptions if an there are not 2 distinct alleles """ |
# Get a list of uniq entries in the data, except for missing
alleles = list(set(data[4:]) - set(DataParser.missing_representation))
if len(alleles) > 2:
raise TooManyAlleles(chr=self.chr, rsid=self.rsid, alleles=alleles)
# We don't have a way to know this in advance, so we want to just iterate onward
# if we encounter one of these
if len(alleles) == 1:
raise TooFewAlleles(chr=self.chr, rsid=self.rsid, alleles=alleles)
# Strip out any excluded individuals
allelic_data = numpy.ma.MaskedArray(numpy.array(data[4:], dtype="S2"), self.ind_mask).compressed().reshape(-1, 2)
maj_allele_count = numpy.sum(allelic_data==alleles[0])
min_allele_count = numpy.sum(allelic_data==alleles[1])
effect_allele_count = min_allele_count
if min_allele_count > maj_allele_count:
alleles = [alleles[1], alleles[0]]
allele_count = maj_allele_count
maj_allele_count = min_allele_count
min_allele_count = allele_count
#genotypes = []
major_allele = alleles[0]
minor_allele = alleles[1]
# Genotypes represent the sum of minor alleles at each sample
genotype_data = numpy.sum(allelic_data==minor_allele, axis=1)
missing_alleles = allelic_data[:, 0]==DataParser.missing_representation
genotype_data[missing_alleles] = DataParser.missing_storage
hetero_count = numpy.sum(genotype_data==1)
return (genotype_data,
major_allele,
minor_allele,
hetero_count,
maj_allele_count,
min_allele_count,
numpy.sum(missing_alleles),
effect_allele_count) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be considered""" |
missing = None
locus_count = 0
# Filter out individuals according to missingness
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
locus_count += 1
allelic_data = numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2)
if missing is None:
missing = numpy.zeros(allelic_data.shape[0], dtype='int8')
missing += (numpy.sum(0+(allelic_data==DataParser.missing_representation), axis=1)/2)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask[:,0] = self.ind_mask[:,0]|dropped_individuals
self.ind_mask[:,1] = self.ind_mask[:,1]|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
self.locus_count = 0
# We can't merge these two iterations since we need to know which individuals
# to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
chr = int(chr)
pos = int(pos)
if DataParser.boundary.TestBoundary(chr, pos, rsid):
allelic_data = numpy.ma.MaskedArray(numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2), self.ind_mask).compressed()
missing = numpy.sum(0+(allelic_data==DataParser.missing_representation))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def populate_iteration(self, iteration):
"""Pour the current data into the iteration object""" |
cur_idx = iteration.cur_idx
genotypes = self.genotype_file.next().split()
iteration.chr, iteration.rsid, junk, iteration.pos = genotypes[0:4]
iteration.chr = int(iteration.chr)
iteration.pos = int(iteration.pos)
if DataParser.boundary.TestBoundary(iteration.chr, iteration.pos, iteration.rsid):
try:
[iteration.genotype_data,
iteration.major_allele,
iteration.minor_allele,
iteration.hetero_count,
iteration.maj_allele_count,
iteration.min_allele_count,
iteration.missing_allele_count,
iteration.allele_count2] = self.process_genotypes(genotypes)
return iteration.maf >= DataParser.min_maf and iteration.maf <= DataParser.max_maf
except TooFewAlleles:
print "\n\n\nSkipping %s:%s %s %s" % (iteration.chr, iteration.pos, iteration.rsid, cur_idx)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_labs(format):
"""Gets current UK Makerspaces data as listed by NESTA.""" |
ukmakerspaces_data = data_from_nesta()
ukmakerspaces = {}
# Iterate over csv rows
for index, row in ukmakerspaces_data.iterrows():
current_lab = UKMakerspace()
current_lab.address_1 = row["Address"].replace("\r", " ")
current_lab.address_2 = row["Region"].replace("\r", " ") + " - " + row["Area"].replace("\r", " ")
current_lab.city = ""
current_lab.county = ""
current_lab.email = row["Email address"]
current_lab.latitude = ""
current_lab.longitude = ""
current_lab.links = ""
current_lab.name = row["Name of makerspace"]
current_lab.phone = row["Phone number"]
current_lab.postal_code = row["Postcode"]
current_lab.url = row["Website / URL"]
# Add the lab, with a slug from the name
ukmakerspaces[current_lab.name] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in ukmakerspaces:
single = ukmakerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = ukmakerspaces
# Default: return an oject
else:
output = ukmakerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_self(self, cls, name):
"""Initialize this descriptor instance Parameters cls : class The class which owns this descriptor name : str The attribute name of this descriptor """ |
# the class the descriptor is defined on
self.this_class = cls
# the attribute name of this descriptor
self.this_name = name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_fields(self, **fields):
"""Add new data fields to this struct instance""" |
self.__class__ = type(self.__class__.__name__,
(self.__class__,), fields)
for k, v in fields.items():
v.init_inst(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def del_fields(self, *names):
"""Delete data fields from this struct instance""" |
cls = type(self)
self.__class__ = cls
for n in names:
# don't raise error if a field is absent
if isinstance(getattr(cls, n, None), DataField):
if n in self._field_values:
del self._field_values[n]
delattr(cls, n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_field(self, name, value):
"""Forcibly sets field values without parsing""" |
f = getattr(self, name, None)
if isinstance(f, DataField):
f.set(self, value)
else:
raise FieldError("No field named '%s'" % name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_success_url(self):
"""Ensure the user-originating redirection URL is safe.""" |
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
# allowed_hosts=self.get_success_url_allowed_hosts(),
# require_https=self.request.is_secure(),
)
if not url_is_safe:
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def form_valid(self, form):
"""Security check complete. Log the user in.""" |
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _request(self, method, url, **kwargs):
''' Wrap `utils.requests.request` adding user and password. '''
self._ask_for_password()
return request(method, url, user=self._user, password=self._password,
**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, weeks):
"""Create the user and ip profiles for the given weeks.""" |
user_pageviews = self.create_profiles('Pageviews', weeks)
user_downloads = self.create_profiles('Downloads', weeks)
self._export_profiles('Profiles', user_pageviews, user_downloads)
user_pageviews = self.create_profiles('Pageviews_IP', weeks, True)
user_downloads = self.create_profiles('Downloads_IP', weeks, True)
self._export_profiles('Profiles_IP', user_pageviews, user_downloads,
ip_user=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _export_profiles(self, profile_name, user_pageviews, user_downloads, ip_user=False):
"""Filter and export the user profiles.""" |
views_min = self.config.get('user_views_min')
views_max = self.config.get('user_views_max')
ip_user_id = 500000000000
add_user_id = 100000000000
stat_records = 0
with self.storage.get_user_profiles(profile_name) as store:
store.clear()
for user in user_pageviews:
# Only users with unique pageviews.
unique_views = len(set(user_pageviews[user]))
if views_max > unique_views >= views_min:
nodes, weight = self._calculate_user_record_weights(
record_list=user_pageviews[user],
download_list=user_downloads.get(user))
if ip_user:
store.add_user(ip_user_id, nodes, weight)
ip_user_id += 1
else:
user = str(add_user_id + int(user))
store.add_user(user, nodes, weight)
self.stat_long['User_num_records'].append(len(nodes))
stat_records += len(nodes)
elif unique_views >= views_min:
# TODO: Add stat for to many views.
print("Drop user {} with {} views".format(user,
unique_views))
self.stat['user_profiles'] = len(self.stat_long.get(
'User_num_records'))
self.stat['user_profiles_records'] = stat_records
print("Stats: {}".format(self.stat)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_profiles(self, prefix, weeks, ip_user=False):
"""Create the user profiles for the given weeks.""" |
# Future: Add a time range in weeks for how long a user is considered
# as the same user.
# Count accessed records
record_counter = {}
for year, week in weeks:
file = self.storage.get(prefix, year, week)
self.count_records(record_counter, file)
# TODO: Statistics, count records
print("Records read all: {}".format(self.stat))
# Filter records with to less/much views.
records_valid = self.filter_counter(record_counter)
# Create user profiles
profiles = defaultdict(list)
for year, week in weeks:
file = self.storage.get(prefix, year, week)
self._create_user_profiles(profiles, file, records_valid, ip_user,
year, week)
return profiles |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count_records(self, record_counter, file):
"""Count the number of viewed records.""" |
counter = record_counter
events_counter = 0
for record in file.get_records():
recid = record[2]
counter[recid] = counter.get(recid, 0) + 1
events_counter += 1
self.stat['user_record_events'] = events_counter
return counter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_counter(self, counter, min=2, max=100000000):
""" Filter the counted records. Returns: List with record numbers. """ |
records_filterd = {}
counter_all_records = 0
for item in counter:
counter_all_records += 1
if max > counter[item] >= min:
records_filterd[item] = counter[item]
self.stat['user_record_events'] = counter_all_records
self.stat['records_filtered'] = len(records_filterd)
return records_filterd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_user_profiles(self, profiles, file, valid_records, ip_user=False, year=None, week=None):
""" Create user profiles with all the records visited or downloaded. Returns: Dictionary with the user id and a record list. {'2323': [1, 2, 4]} """ |
for record in file.get_records():
recid = record[2]
if not valid_records.get(recid, None):
# Record not valid
continue
if ip_user:
ip = record[4]
user_agent = record[5]
# Generate unique user id
user_id = "{0}-{1}_{2}_{3}".format(year, week, ip, user_agent)
try:
uid = hashlib.md5(user_id.encode('utf-8')).hexdigest()
except UnicodeDecodeError:
logger.info("UnicodeDecodeError {}".format(user_id))
else:
uid = record[1]
profiles[uid].append(recid)
return profiles |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_dims(opts):
'''
Script option handling.
'''
use,res = [],[];
if opts['--X']:
use.append('x');
res.append(int(opts['--xres']));
if opts['--Y']:
use.append('y');
res.append(int(opts['--yres']));
if opts['--Z']:
use.append('z');
res.append(int(opts['--zres']));
if use == []:
use = ['x','y','z'];
res = map(lambda k: int(opts[k]),['--xres','--yres','--zres']);
# A couple of things to note; written in this way, whatever
# this list (and thus, what is read) becomes, it is ordered
# alphabetically. This is important, as this determines what
# each resulting row and column and breadth in the output
# array corresponds to from the actual simulation.
#
# It is probably worth mentioning that the xz in simulation
# axes will be [0,1] in numpy axes, that is, it will be left-handed.
# Using xz leads to this anyway, but it's worth reminding the reader.
# To permute in 2D, use the --permute flag.
return use,res; |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use.""" |
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process(compressor, input_filename, output_filename):
"""Helper function to compress an image. Returns: _CompressorResult named tuple, with the resulting size, the name of the output file and the name of the compressor. """ |
compressor(input_filename, output_filename)
result_size = os.path.getsize(output_filename)
return _CompressorResult(result_size, output_filename, compressor.__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compress_with(input_filename, output_filename, compressors):
"""Helper function to compress an image with several compressors. In case the compressors do not improve the filesize or in case the resulting image is not equivalent to the source, then the output will be a copy of the input. """ |
with _temporary_filenames(len(compressors)) as temp_filenames:
results = []
for compressor, temp_filename in zip(compressors, temp_filenames):
results.append(_process(compressor, input_filename, temp_filename))
best_result = min(results)
os.rename(best_result.filename, output_filename)
best_compressor = best_result.compressor
if best_result.size >= os.path.getsize(input_filename):
best_compressor = None
if (best_compressor is not None and
not _images_are_equal(input_filename, output_filename)):
logging.info('Compressor "%s" generated an invalid image for "%s"',
best_compressor, input_filename)
best_compressor = None
if best_compressor is None:
shutil.copy(input_filename, output_filename)
logging.info('%s: best compressor for "%s"', best_compressor,
input_filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_to_map(map_obj, lat, lon, date_time, key, cluster_obj):
"""Add individual elements to a foilum map in a cluster object""" |
text = "Event {0} at {1}".format(key, date_time.split()[1])
folium.Marker([lat, lon], popup=text).add_to(cluster_obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def sign(hash,priv,k=0):
'''
Returns a DER-encoded signature from a input of a hash and private
key, and optionally a K value.
Hash and private key inputs must be 64-char hex strings,
k input is an int/long.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
>>> k = 4 # chosen by fair dice roll, guaranteed to be random
>>> sign(h,p,k)
'3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
'''
if k == 0:
k = generate_k(priv, hash)
hash = int(hash,16)
priv = int(priv,16)
r = int(privtopub(dechex(k,32),True)[2:],16) % N
s = ((hash + (r*priv)) * modinv(k,N)) % N
# High S value is non-standard (soon to be invalid)
if s > (N / 2):
s = N - s
r, s = inttoDER(r), inttoDER(s)
olen = dechex(len(r+s)//2,1)
return '30' + olen + r + s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def verify(hash,sig,pub,exceptonhighS=False):
'''
Verify a DER-encoded signature against a given hash and public key
No checking of format is done in this function, so the signature
format (and other inputs) should be verified as being the correct
format prior to using this method.
Hash is just 64-char hex string
Public key format can be verified with validatepubkey() which is
found in .bitcoin
Signature format can be validated with checksigformat() which is
the next function after this
'exceptonhighS' is available because many Bitcoin implementations
will soon be invalidating high S values in signatures, in order
to reduce transaction malleability issues. I decided an exception
was preferable to returning False, so as to be distinct from a bad
signature.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> sig = '3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
>>> pub = '022587327dabe23ee608d8504d8bc3a341397db1c577370389f94ccd96bb59a077'
>>> verify(h,sig,pub)
True
>>> sig = '3046022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13022100a671c81d199d8810b2f350f1cd2f6a1fff7268a495f813682b18ea0e7bafde8a'
>>> verify(h,sig,pub)
True
>>> verify(h,sig,uncompress(pub))
True
>>> verify(h,sig,pub,True)
Traceback (most recent call last):
...
TypeError: High S value.
'''
rlen = 2*int(sig[6:8],16)
r = int(sig[8:8+(rlen)],16)
s = int(sig[(12+rlen):],16) # Ignoring s-len; format dictates it
# will be to the end of string
assert r < N
if exceptonhighS:
if s > (N / 2):
raise TypeError("High S value.")
w = modinv(s,N)
x = int(addpubs(
privtopub(dechex((int(hash,16) * w) % N,32),False),
multiplypub(pub,dechex((r*w) % N,32),False),
False)[2:66],16)
return x==r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def checksigformat(a,invalidatehighS=False):
'''
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
'''
try:
a = hexstrlify(unhexlify(a))
except:
return False
try:
rlen = 2*int(a[6:8],16)
slen = 2*int(a[(10+rlen):(12+rlen)],16)
r = a[8:8+(rlen)]
s1 = a[(12+rlen):]
s2 = a[(12+rlen):(12+rlen+slen)]
assert s1 == s2
s1 = int(s1,16)
assert s1 < N
assert a[:2] == '30'
assert len(a) == ((2*int(a[2:4],16)) + 4)
assert a[4:6] == '02'
assert a[(8+rlen):(10+rlen)] == '02'
if int(dechex(int(r,16))[:2],16) > 127:
assert r[:2] == '00'
assert r[2:4] != '00'
else:
assert r[:2] != '00'
if int(dechex(s1)[:2],16) > 127:
assert s2[:2] == '00'
assert s2[2:4] != '00'
else:
assert s2[:2] != '00'
assert len(r) < 67
assert len(s2) < 67
except AssertionError:
return False
except Exception as e:
raise Exception(str(e))
if invalidatehighS:
if s1 > (N / 2):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def param_converter(*decorator_args, **decorator_kwargs):
""" Call with the url parameter names as keyword argument keys, their values being the model to convert to. Models will be looked up by the url param names. If a url param name is prefixed with the snake-cased model name, the prefix will be stripped. If a model isn't found, abort with a 404. The action's argument names must match the snake-cased model names. For example:: @bp.route('/users/<int:user_id>/posts/<int:id>') @param_converter(user_id=User, id=Post) def show_post(user, post):
# the param converter does the database lookups: # user = User.query.filter_by(id=user_id).first() # post = Post.query.filter_by(id=id).first() # and calls the decorated action: show_post(user, post) # or to customize the argument names passed to the action: @bp.route('/users/<int:user_id>/posts/<int:post_id>') @param_converter(user_id={'user_arg_name': User}, post_id={'post_arg_name': Post}) def show_post(user_arg_name, post_arg_name):
Also supports parsing arguments from the query string. For query string keyword arguments, use a lookup (dict, Enum) or callable:: @bp.route('/users/<int:id>') @param_converter(id=User, foo=str, optional=int) def show_user(user, foo, optional=10):
# GET /users/1?foo=bar # calls show_user(user=User.get(1), foo='bar') """ |
def wrapped(fn):
@wraps(fn)
def decorated(*view_args, **view_kwargs):
view_kwargs = _convert_models(view_kwargs, decorator_kwargs)
view_kwargs = _convert_query_params(view_kwargs, decorator_kwargs)
return fn(*view_args, **view_kwargs)
return decorated
if decorator_args and callable(decorator_args[0]):
return wrapped(decorator_args[0])
return wrapped |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(name, filepath, separator="---"):
"""Load given file into knowledge base. Simply load data into an existing knowledge base: .. code-block:: console $ inveniomanage knowledge load mykb /path/to/file.kb The file is expected to have a mapping with values: ``foo<seperator>bar`` (per line). ``<separator>`` is by default set to **---**, but can be overridden with ``-s someseperator`` or ``--sep someseperator``. """ |
current_app.logger.info(
">>> Going to load knowledge base {0} into '{1}'...".format(
filepath, name
)
)
if not os.path.isfile(filepath):
current_app.logger.error(
"Path to non-existing file\n",
file=sys.stderr
)
sys.exit(1)
try:
get_kb_by_name(name)
except NoResultFound:
current_app.logger.error(
"KB does not exist\n",
file=sys.stderr
)
sys.exit(1)
num_added = load_kb_mappings_file(name, filepath, separator)
current_app.logger.info(
">>> Knowledge '{0}' updated successfully with {1} entries.".format(
name, num_added
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Run manager.""" |
from invenio_base.factory import create_app
app = create_app()
manager.app = app
manager.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def command(func):
"""Decorator for CLI exposed functions""" |
func.parser = SUB_PARSER.add_parser(func.__name__, help=func.__doc__)
func.parser.set_defaults(func=func)
return func |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serv(args):
"""Serve a rueckenwind application""" |
if not args.no_debug:
tornado.autoreload.start()
extra = []
if sys.stdout.isatty():
# set terminal title
sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:])))
if args.cfg:
extra.append(os.path.abspath(args.cfg))
listen = (int(args.port), args.address)
ioloop = tornado.ioloop.IOLoop.instance()
setup_app(app=args.MODULE, extra_configs=extra,
ioloop=ioloop, listen=listen)
ioloop.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Entry point of rw cli""" |
# check logging
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.basicConfig(level=getattr(logging, log_level),
format='%(asctime)s %(name)s[%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_path = os.path.abspath('.')
if current_path not in sys.path:
sys.path.insert(0, current_path)
argcomplete.autocomplete(ARG_PARSER)
args = ARG_PARSER.parse_args()
args.func(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _checker(keywords):
"""Generate a checker which tests a given value not starts with keywords.""" |
def _(v):
"""Check a given value matches to keywords."""
for k in keywords:
if k in v:
return False
return True
return _ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_doc(doc):
"""Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary. """ |
lines = doc.split("\n")
descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))
if len(descriptions) < 3:
description = lines[0]
else:
description = "{0}\n\n{1}".format(
lines[0], textwrap.dedent("\n".join(descriptions[2:])))
args = list(itertools.takewhile(
_checker(_KEYWORDS_OTHERS),
itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))
argmap = {}
if len(args) > 1:
for pair in args[1:]:
kv = [v.strip() for v in pair.split(":")]
if len(kv) >= 2:
argmap[kv[0]] = ":".join(kv[1:])
return dict(headline=descriptions[0], description=description, args=argmap) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_parser(self, func=None, name=None, **kwargs):
"""Add parser. This method makes a new sub command parser. It takes same arguments as add_parser() of the action class made by argparse.ArgumentParser.add_subparsers. In addition to, it takes one positional argument `func`, which is the function implements process of this sub command. The `func` will be used to determine the name, help, and description of this sub command. The function `func` will also be set as a default value of `cmd` attribute. If you want to choose name of this sub command, use keyword argument `name`. Args: func: function implements the process of this command. name: name of this command. If not give, the function name is used. Returns: new ArgumentParser object. Raises: ValueError: if the given function does not have docstrings. """ |
if func:
if not func.__doc__:
raise ValueError(
"No docstrings given in {0}".format(func.__name__))
info = _parse_doc(func.__doc__)
if _HELP not in kwargs or not kwargs[_HELP]:
kwargs[_HELP] = info["headline"]
if _DESCRIPTION not in kwargs or not kwargs[_DESCRIPTION]:
kwargs[_DESCRIPTION] = info["description"]
if _FORMAT_CLASS not in kwargs or not kwargs[_FORMAT_CLASS]:
kwargs[_FORMAT_CLASS] = argparse.RawTextHelpFormatter
if not name:
name = func.__name__ if hasattr(func, "__name__") else func
res = self.__delegate.add_parser(name, argmap=info["args"], **kwargs)
res.set_defaults(cmd=func)
else:
res = self.__delegate.add_parser(name, **kwargs)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_argument(self, *args, **kwargs):
"""Add an argument. This method adds a new argument to the current parser. The function is same as ``argparse.ArgumentParser.add_argument``. However, this method tries to determine help messages for the adding argument from some docstrings. If the new arguments belong to some sub commands, the docstring of a function implements behavior of the sub command has ``Args:`` section, and defines same name variable, this function sets such definition to the help message. Positional Args: same positional arguments as argparse.ArgumentParser.add_argument. Keyword Args: same keywards arguments as argparse.ArgumentParser.add_argument. """ |
if _HELP not in kwargs:
for name in args:
name = name.replace("-", "")
if name in self.__argmap:
kwargs[_HELP] = self.__argmap[name]
break
return super(ArgumentParser, self).add_argument(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __user_location(__pkg: str, type_) -> str: """Utility function to look up XDG basedir locations Args: __pkg: Package name __type: Location type """ |
if ALLOW_DARWIN and sys.platform == 'darwin':
user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])
else:
user_dir = getenv('XDG_{}_HOME'.format(type_.upper()),
path.sep.join([getenv('HOME', ''),
__LOCATIONS[type_][1]]))
return path.expanduser(path.sep.join([user_dir, __pkg])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_configs(__pkg: str, __name: str = 'config') -> List[str]: """Return all configs for given package. Args: __pkg: Package name __name: Configuration file name """ |
dirs = [user_config(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))
configs = []
for dname in reversed(dirs):
test_path = path.join(dname, __name)
if path.exists(test_path):
configs.append(test_path)
return configs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data(__pkg: str, __name: str) -> str: """Return top-most data file for given package. Args: __pkg: Package name __name: Data file name """ |
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data_dirs(__pkg: str) -> List[str]: """Return all data directories for given package. Args: __pkg: Package name """ |
dirs = [user_data(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_DATA_DIRS',
'/usr/local/share/:/usr/share/').split(':'))
return [d for d in dirs if path.isdir(d)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def replay_messages(filepath, process_message, *args, **kwargs):
''' Take pulse messages from a file and process each with process_message.
:param filepath: File containing dumped pulse messages
:type filepath: str
:param process_message: Function to process each pulse message with
:type process_message: function
:param *args: Arguments to be passed to process_message()
:type *args: tuple
:param **kwargs: Keyword argument to be passed to process_message()
:type **kwargs: dict
:returns: Nothing
:rtype: None
'''
message = Mock()
file_contents = _read_file(filepath)
for line in file_contents.splitlines():
# Using ast.literal_eval to turn pulse message strings into dicts
process_message(ast.literal_eval(line), message, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_path(path, filetype=None, has_filetype=True):
""" Convert dot-separated paths to directory paths Allows non-python files to be placed in the PYTHONPATH and be referenced using dot-notation instead of absolute or relative file-system paths. If a text file, named test.txt was placed in a python repo named myprog in the module named util, then: normalize_path('myprog.util.test.txt') would return the file-system's path to the file 'test.txt'. Parameters: path - path to convert filetype - don't include as part of path if present as last token has_filetype - if True, treat last dot-delimited token as filetype Notes: 1. Paths are relative to PYTHONPATH. 2. If the specified path is not a string, it is returned without change. 3. If the specified path contains os-specific path separator characters, the path is returned without change. 4. If has_filetype is True, filetype does not have to be specified. If filetype is specified, has_filetype is ignored, and filetype must match the last dot-delimited token exactly. """ |
if not isinstance(path, str):
return path
if '.' in path and os.path.sep not in path: # path is dot separated
parts = path.split('.')
extension = ''
if len(parts) > 1:
if filetype and has_filetype:
has_filetype = False # filetype is more specific
if (filetype and parts[-1] == filetype) or has_filetype:
extension = '.' + parts[-1]
parts = parts[:-1]
if len(parts) > 1:
if PY3:
spec = importlib.util.find_spec(parts[0])
path = list(spec.submodule_search_locations)[0]
else:
_, path, _ = imp.find_module(parts[0])
path = os.path.join(path, *parts[1:]) + extension
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
""" Executes a list of functions and returns the first non none result. All kwargs will be passed as kwargs to each individual function. If all functions return None, None is the overall result. Examples: 3 None 2 """ |
Validator.is_real_iterable(raise_ex=True, eval_list=eval_list)
for eval_fun in eval_list:
res = eval_fun(**kwargs)
if res is not None:
return res
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_person_new(self, people):
""" New people joined the audience :param people: People that just joined the audience :type people: list[paps.person.Person] :rtype: None """ |
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
if person.id in self._people:
self.warning(
u"{} already in audience".format(person.id)
)
self._people[person.id] = person
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_new(changed)
except:
self.exception(
u"Failed to send new people to {}".format(plugin.name)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_col_name(self, col_name, used_column_names, is_relation):
""" Modify the column name to make it Python-compatible as a field name """ |
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_type(self, connection, table_name, row):
""" Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. """ |
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def MonSQL(host=None, port=None, username=None, password=None, dbname=None, dbpath=None, dbtype=None):
""" Initialize and return a Database instance """ |
if dbtype is None:
raise MonSQLException('Database type must be specified')
if dbtype == DB_TYPES.MYSQL:
return MySQLDatabase(host, port, username, password, dbname)
elif dbtype == DB_TYPES.SQLITE3:
return SQLite3Database(dbpath)
elif dbtype == DB_TYPES.POSTGRESQL:
return PostgreSQLDatabase(host, port, username, password, dbname)
else:
raise MonSQLException('Database type %s not supported' %dbtype) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_password(self, raw_password):
""" Returns a boolean of whether the raw_password was correct. Handles hashing formats behind the scenes. """ |
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=[self.PASSWORD_FIELD])
return check_password(raw_password, getattr(self, self.PASSWORD_FIELD), setter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_context_data(self, **kwargs):
""" Insert the form into the context dict. """ |
for key in self.get_form_class_keys():
kwargs['{}_form'.format(key)] = self.get_form(key)
return super(FormMixin, self).get_context_data(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def form_valid(self, forms):
""" If the form is valid, save the associated model. """ |
for key, form in forms.items():
setattr(self, '{}_object'.format(key), form.save())
return super(MultipleModelFormMixin, self).form_valid(forms) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def view_on_site(self, request, content_type_id, object_id):
""" Redirect to an object's page based on a content-type ID and an object ID. """ |
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise Http404(_("Content type %(ct_id)s object has no associated model") % {
'ct_id': content_type_id,
})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") % {
'ct_id': content_type_id,
'obj_id': object_id,
})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") % {
'ct_name': content_type.name,
})
absurl = get_absolute_url()
return HttpResponseRedirect(absurl) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_graderoster(section, instructor, requestor):
""" Returns a restclients.GradeRoster for the passed Section model and instructor Person. """ |
label = GradeRoster(section=section,
instructor=instructor).graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Accept": "text/xhtml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
response = SWS_GradeRoster_DAO().getURL(url, headers)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=section,
instructor=instructor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_graderoster(graderoster, requestor):
""" Updates the graderoster resource for the passed restclients.GradeRoster model. A new restclients.GradeRoster is returned, representing the document returned from the update request. """ |
label = graderoster.graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Content-Type": "application/xhtml+xml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
body = graderoster.xhtml()
response = SWS_GradeRoster_DAO().putURL(url, headers, body)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=graderoster.section,
instructor=graderoster.instructor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def netdevs():
''' RX and TX bytes for each of the network devices '''
with open('/proc/net/dev') as f:
net_dump = f.readlines()
device_data={}
data = namedtuple('data',['rx','tx'])
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0),
float(line[1].split()[8])/(1024.0*1024.0))
return device_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_model(self, model, model_id):
"""Get a single model from the server. Args: model (string):
The class as a string. model_id (string):
The integer ID as a string. Returns: :class:`cinder_data.model.CinderModel`: A instance of the model. """ |
return self._store.find_record(self._get_model_class(model), int(model_id)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_models(self, model, page=None):
"""Get all the models from the server. Args: model (string):
The class as a string. page (string, optional):
The page number as a string Returns: list: A list of instances of the requested model. """ |
if page is not None:
return self._store.find_all(self._get_model_class(model), params={'page': int(page)})
else:
return self._store.find_all(self._get_model_class(model)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def makeFigFromFile(filename,*args,**kwargs):
""" Renders an image in a matplotlib figure, so it can be added to reports args and kwargs are passed to plt.subplots """ |
import matplotlib.pyplot as plt
img = plt.imread(filename)
fig,ax = plt.subplots(*args,**kwargs)
ax.axis('off')
ax.imshow(img)
return fig |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_exes(self):
"""List the installed executables by this project.""" |
return [path.join(self.env_bin, f)
for f
in os.listdir(self.env_bin)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_env(self):
"""Create a virtual environment.""" |
virtualenv(self.env, _err=sys.stderr)
os.mkdir(self.env_bin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install_program(self, extra_args):
"""Install the app to the virtualenv""" |
pip = Command(path.join(self.env, 'bin', 'pip'))
args = ['install', self.raw_name,
'--install-option', '--install-scripts={}'
.format(self.env_bin)] + list(extra_args)
print_pretty("<BOLD>pip {}<END>\n".format(' '.join(args)))
pip(args, _out=sys.stdout, _err=sys.stderr)
print('') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_links(self):
"""Create links to installed scripts in the virtualenv's bin directory to our bin directory. """ |
for link in self.list_exes():
print_pretty("<FG_BLUE>Creating link for {}...<END>".format(link))
os.symlink(link, path.join(ENV_BIN, path.basename(link))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_links(self):
"""Remove links from our bin.""" |
for link in self.list_exes():
link = path.join(ENV_BIN, path.basename(link))
print_pretty("<FG_BLUE>Removing link {}...<END>".format(link))
os.remove(link) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uninstall(self):
"""Uninstall the environment and links.""" |
if path.isdir(self.env_bin):
self.remove_links()
if path.isdir(self.env):
print_pretty("<FG_BLUE>Removing env {}...<END>".format(self.env))
shutil.rmtree(self.env) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install(self, pip_args=None):
"""Install the program and put links in place.""" |
if path.isdir(self.env):
print_pretty("<FG_RED>This seems to already be installed.<END>")
else:
print_pretty("<FG_BLUE>Creating environment {}...<END>\n".format(self.env))
self.create_env()
self.install_program(pip_args)
self.create_links() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_git(config_info):
"""This function initializes and Git SCM tool object.""" |
git_args = {}
def _add_value(value, key):
args_key, args_value = _GIT_ARG_FNS[key](value)
git_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder("git", config_info, _GIT_ARGS, _add_value)
if git_args.get("uri"):
return devpipeline_scm.make_simple_scm(Git(git_args), config_info)
else:
raise Exception("No git uri ({})".format(config_info.config.name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checkout(self, repo_dir, shared_dir, **kwargs):
"""This function checks out code from a Git SCM server.""" |
del kwargs
args = []
for checkout_fn in _CHECKOUT_ARG_BUILDERS:
args.extend(checkout_fn(shared_dir, repo_dir, self._args))
return args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, repo_dir, **kwargs):
"""This function updates an existing checkout of source code.""" |
del kwargs
rev = self._args.get("revision")
if rev:
return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command(
rev, repo_dir
)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_absorption(line, lines):
"""Parse Energy, Re sigma xx, Re sigma zz, absorp xx, absorp zz""" |
split_line = line.split()
energy = float(split_line[0])
re_sigma_xx = float(split_line[1])
re_sigma_zz = float(split_line[2])
absorp_xx = float(split_line[3])
absorp_zz = float(split_line[4])
return {"energy": energy, "re_sigma_xx": re_sigma_xx, "re_sigma_zz": re_sigma_zz,
"absorp_xx": absorp_xx, "absorp_zz": absorp_zz} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def args(parsed_args, name=None):
"""Interpret parsed args to streams""" |
strings = parsed_args.arg_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
else:
streams = []
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
if getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
elif not streams:
streams = [sys.stdin]
return streams |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(argv=None):
"""Execute each module in the same interpreter. Args: argv: Each item of argv will be treated as a separate module with potential arguments each item may be a string or a sequence of strings. If a given argument is a string, then treat string as shell arguments and split accordingly. If the given argument is a tuple or list, then assume that the given arguments are already parsed. The first item of each argument should be a module or module path """ |
if argv is None:
argv = sys.argv[1:]
args = _get_parser().parse_args(argv)
mand(args.module_seq) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_multiple_modules(module_gen):
"""Call each module module_gen should be a iterator """ |
for args_seq in module_gen:
module_name_or_path = args_seq[0]
with replace_sys_args(args_seq):
if re.match(VALID_PACKAGE_RE, module_name_or_path):
runpy.run_module(module_name_or_path,
run_name='__main__')
else:
runpy.run_path(module_name_or_path,
run_name='__main__') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_sys_args(new_args):
"""Temporarily replace sys.argv with current arguments Restores sys.argv upon exit of the context manager. """ |
# Replace sys.argv arguments
# for module import
old_args = sys.argv
sys.argv = new_args
try:
yield
finally:
sys.argv = old_args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_bim(self, map3=False):
"""Basic marker details loading. (chr, rsid, gen. dist, pos, allelel 1, allele2) :param map3: When true, ignore the genetic distance column :return: None """ |
cols = [0, 1, 3, 4, 5]
if map3:
cols = [0, 1, 2, 3, 4]
logging.info("Loading file: %s" % self.bim_file)
val = sys_call('wc -l %s' % (self.bim_file))[0][0].split()[0]
marker_count = int(val)
self.markers = numpy.zeros((marker_count, 2), dtype=int)
self.rsids = []
self.alleles = []
with open(self.bim_file) as file:
index = 0
for line in file:
if map3:
chr, rsid, pos, al1, al2 = line.strip().split()
else:
chr, rsid, gd, pos, al1, al2 = line.strip().split()
self.markers[index, 0] = int(chr)
self.markers[index, 1] = int(pos)
self.alleles.append([al1, al2])
self.rsids.append(rsid)
index += 1
self.locus_count = self.markers.shape[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_genotype_file(self):
"""Resets the bed file and preps it for starting at the start of the \ genotype data Returns to beginning of file and reads the version so that it points \ to first marker's info :return: None """ |
self.genotype_file.seek(0)
buff = self.genotype_file.read(3)
version = 0
magic, data_format = buff.unpack("HB", version)
return magic, data_format |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_genotypes(self, bytes):
"""Extracts encoded genotype data from binary formatted file. :param bytes: array of bytes pulled from the .bed file :return: standard python list containing the genotype data Only ind_count genotypes will be returned (even if there are a handful of extra pairs present). """ |
genotypes = []
for b in bytes:
for i in range(0, 4):
v = ((b>>(i*2)) & 3)
genotypes.append(self.geno_conversions[v])
return genotypes[0:self.ind_count] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be \ considered :return: None This must be run prior to actually parsing the genotypes because it initializes the following instance members: * ind_mask * total_locus_count * locus_count * data_parser.boundary (adds loci with too much missingness) """ |
missing = None
locus_count = 0
logging.info("Sorting out missing data from genotype data")
# Filter out individuals according to missingness
self.genotype_file.seek(0)
magic, data_format = struct.unpack("<HB", self.genotype_file.read(3))
if data_format != 1:
Exit(("MVTEST is currently unable to read data formatted as " +
"individual major. You must regenerate your data in SNP major"+
" format. "))
self.bytes_per_read = self.ind_count / 4
if self.ind_count % 4 > 0:
self.bytes_per_read += 1
self.fmt_string = "<" + "B"*self.bytes_per_read
last_chr = -1
for index in range(self.locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
genotypes = numpy.array(self.extract_genotypes(buffer),
dtype=numpy.int8)
locus_count += 1
if missing is None:
missing = numpy.zeros(genotypes.shape[0], dtype='int8')
missing += 0+(genotypes==DataParser.missing_storage)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask = self.ind_mask|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
# We can't merge these two iterations since we need to know which
# individuals to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
self.genotype_file.read(3)
self.total_locus_count = self.locus_count
self.locus_count = 0
last_chr = -1
for index in range(self.total_locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
genotypes = numpy.ma.MaskedArray(self.extract_genotypes(buffer),
self.ind_mask).compressed()
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
missing = numpy.sum(0+(genotypes==DataParser.missing_storage))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def full_path(path):
"""Get the real path, expanding links and bashisms""" |
return os.path.realpath(os.path.expanduser(os.path.expandvars(path))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, generator):
"""Parse an iterable source of strings into a generator""" |
gen = iter(generator)
for line in gen:
block = {}
for rule in self.rules:
if rule[0](line):
block = rule[1](line, gen)
break
yield block |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ReportConfiguration(self, file):
"""Report the configuration details for logging purposes. :param file: Destination for report details :return: None """ |
global encodingpar
print >> file, libgwas.BuildReportLine("MACH_ARCHIVES", "")
if self.chrpos_encoding:
print >> file, libgwas.BuildReportLine("MACH_CHRPOS",
("IDS expected to be in format chr:pos" +
" SNP boundary filters might not work " +
"(see manual for details)"))
else:
print >> file, libgwas.BuildReportLine("MACH_CHRPOS",
"IDs are treated like RSIDs")
idx = 0
for arch in self.archives[0:]:
print >> file, libgwas.BuildReportLine("", "%s:%s" % (self.archives[idx], self.info_files[idx]))
idx += 1
print >> file, libgwas.BuildReportLine("ENCODING", ["Dosage", "Genotype"][encoding]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.