text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_homepage(config, env):
"""Render the homepage.jinja template.""" |
template = env.get_template('homepage.jinja')
rendered_page = template.render(
config=config)
return rendered_page |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4):
"""Get calendar date probabilities Parameters calibcurve : CalibCurve Calibration curve. rcmean : scalar Reservoir-adjusted age. w2 : scalar r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011. cutoff : scalar, optional Unknown. normal_distr : Bool, optional Use normal distribution for date errors. If False, then use Student's t-distribution. t_a : scalar, optional Student's t-distribution parameter, a. t_b - 1 must equal t_b. t_b : scalar, optional Student's t-distribution parameter, b. t_b - 1 must equal t_b. #Line 943 of Bacon.R #cc : calib_curve (3-col format) #rcmean : det['age'][i] - d_R #w2 : dat['error'][i]^2 + d_STD**2 """ |
assert t_b - 1 == t_a
if normal_distr:
# TODO(brews): Test this. Line 946 of Bacon.R.
std = np.sqrt(calibcurve.error ** 2 + w2)
dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age)
else:
# TODO(brews): Test this. Line 947 of Bacon.R.
dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5))
cal = np.array([calibcurve.calbp.copy(), dens]).T
cal[:, 1] = cal[:, 1] / cal[:, 1].sum()
# "ensure that also very precise dates get a range of probabilities"
cutoff_mask = cal[:, 1] > cutoff
if cutoff_mask.sum() > 5:
out = cal[cutoff_mask, :]
else:
calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50)
caly = np.interp(calx, cal[:, 0], cal[:, 1])
out = np.array([calx, caly / caly.sum()]).T
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]):
"""Get density of calendar dates for chron date segment in core Parameters chron : DatedProxy-like calib_curve : CalibCurve or list of CalibCurves d_r : scalar or ndarray Carbon reservoir offset. d_std : scalar or ndarray Carbon reservoir offset error standard deviation. cutoff : scalar, optional Unknown. normal_distr : Bool, optional Use normal distribution for date errors. If False, then use Student's t-distribution. t_a : scalar or ndarray, optional Student's t-distribution parameter, a. t_a - 1 must equal t_b. t_b : scalar or ndarray, optional Student's t-distribution parameter, b. t_a - 1 must equal t_b. Returns ------- depth : ndarray Depth of dated sediment sample. probs : list of 2d arrays Density of calendar age for each dated sediment sample. For each sediment sample, the 2d array has two columns, the first is the calendar age. The second column is the density for that calendar age. """ |
# Python version of .bacon.calib() on line 908 in Bacon.R
# .bacon.calib - line 908
# rcmean = 4128; w2 = 4225; t_a=3; t_b=4
# test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a,
# t_b=t_b, cutoff=cutoff, normal = normal)
# Line 959 of Bacon.R
# calib = list(dets.iloc[:, 3])
# Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R
# Line #973
# TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above.
# TODO(brews): Check whether we call returned values densities, freqs or what options we should have.
n = len(chron.depth)
calib_curve = np.array(calib_curve)
t_a = np.array(t_a)
t_b = np.array(t_b)
assert t_b - 1 == t_a
d_r = np.array(d_r)
d_std = np.array(d_std)
if len(t_a) == 1:
t_a = np.repeat(t_a, n)
if len(t_b) == 1:
t_b = np.repeat(t_b, n)
if len(d_r) == 1:
d_r = np.repeat(d_r, n)
if len(d_std) == 1:
d_std = np.repeat(d_std, n)
if len(calib_curve) == 1:
calib_curve = np.repeat(calib_curve, n)
calib_probs = []
rcmean = chron.age - d_r
w2 = chron.error ** 2 + d_std ** 2
for i in range(n):
age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i],
t_a=t_a[i], t_b=t_b[i],
cutoff=cutoff, normal_distr=normal_distr)
calib_probs.append(age_realizations)
return np.array(chron.depth), calib_probs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_browser(self):
"""Overide in appropriate way to prepare a logged in browser.""" |
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url + "/youraccount/login")
try:
self.browser.fill('nickname', self.user)
self.browser.fill('password', self.password)
except:
self.browser.fill('p_un', self.user)
self.browser.fill('p_pw', self.password)
self.browser.fill('login_method', self.login_method)
self.browser.find_by_css('input[type=submit]').click() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_marcxml(self, marcxml, mode):
"""Upload a record to the server. :param marcxml: the XML to upload. :param mode: the mode to use for the upload. - "-i" insert new records - "-r" replace existing records - "-c" correct fields of records - "-a" append fields to records - "-ir" insert record or replace if it exists """ |
if mode not in ["-i", "-r", "-c", "-a", "-ir"]:
raise NameError("Incorrect mode " + str(mode))
return requests.post(self.server_url + "/batchuploader/robotupload",
data={'file': marcxml, 'mode': mode},
headers={'User-Agent': CFG_USER_AGENT}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url(self):
""" Returns the URL to this record. Returns None if not known """ |
if self.server_url is not None and \
self.recid is not None:
return '/'.join(
[self.server_url, CFG_SITE_RECORD, str(self.recid)])
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_list_of_twitter_list(list_of_twitter_lists, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set):
""" Extracts the sets of keywords for each Twitter list. Inputs: - list_of_twitter_lists: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_keyword_sets: A list of sets of keywords (i.e. not a bag-of-words) in python set format. - list_of_lemma_to_keywordbags: List of python dicts that map stems/lemmas to original topic keywords. """ |
list_of_keyword_sets = list()
append_keyword_set = list_of_keyword_sets.append
list_of_lemma_to_keywordbags = list()
append_lemma_to_keywordbag = list_of_lemma_to_keywordbags.append
if list_of_twitter_lists is not None:
for twitter_list in list_of_twitter_lists:
if twitter_list is not None:
keyword_set, lemma_to_keywordbag = clean_twitter_list(twitter_list,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
append_keyword_set(keyword_set)
append_lemma_to_keywordbag(lemma_to_keywordbag)
return list_of_keyword_sets, list_of_lemma_to_keywordbags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def user_twitter_list_bag_of_words(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set):
""" Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords. """ |
# Extract a bag-of-words from a list of Twitter lists.
# May result in empty sets
list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
# Reduce keyword sets.
bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets)
# Reduce lemma to keywordbag maps.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordbags:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grouper(iterable, n, pad_value=None):
""" Returns a generator of n-length chunks of an input iterable, with appropriate padding at the end. Example: grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x') Inputs: - iterable: The source iterable that needs to be chunkified. - n: The size of the chunks. - pad_value: The value with which the last chunk will be padded. Output: - chunk_gen: A generator of n-length chunks of an input iterable. """ |
chunk_gen = (chunk for chunk in zip_longest(*[iter(iterable)]*n, fillvalue=pad_value))
return chunk_gen |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chunks(iterable, n):
""" A python generator that yields 100-length sub-list chunks. Input: - full_list: The input list that is to be separated in chunks of 100. - chunk_size: Should be set to 100, unless the Twitter API changes. Yields: - sub_list: List chunks of length 100. """ |
for i in np.arange(0, len(iterable), n):
yield iterable[i:i+n] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_every(iterable, n):
# TODO: Remove this, or make it return a generator. """ A generator of n-length chunks of an input iterable """ |
i = iter(iterable)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_properties(item_properties, prop_name, merge_value):
""" Tries to figure out which type of property value that should be merged and invoke the right function. Returns new properties if the merge was successful otherwise False. """ |
existing_value = item_properties.get(prop_name, None)
if not existing_value: # A node without existing values for the property
item_properties[prop_name] = merge_value
else:
if type(merge_value) is int or type(merge_value) is str:
item_properties[prop_name] = existing_value + merge_value
elif type(merge_value) is list:
item_properties[prop_name] = merge_list(existing_value, merge_value)
else:
return False
return item_properties |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_id(self, element):
""" Generate a id for a element. :param element: The element. :type element: hatemile.util.html.HTMLDOMElement """ |
if not element.has_attribute('id'):
element.set_attribute('id', self.prefix_id + str(self.count))
self.count = self.count + 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_state_data(self, states):
""" Fetch census estimates from table. """ |
print("Fetching census data")
for table in CensusTable.objects.all():
api = self.get_series(table.series)
for variable in table.variables.all():
estimate = "{}_{}".format(table.code, variable.code)
print(
">> Fetching {} {} {}".format(
table.year, table.series, estimate
)
)
for state in tqdm(states):
self.get_state_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_county_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_district_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has(self, name):
""" Returns True if there is atleast one annotation by a given name, otherwise False. """ |
for a in self.all_annotations:
if a.name == name:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_first(self, name):
""" Get the first annotation by a given name. """ |
for a in self.all_annotations:
if a.name == name:
return a
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all(self, name):
""" Get all the annotation by a given name. """ |
return [annot for annot in self.all_annotations if annot.name == name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def first_value_of(self, name, default_value = None):
""" Return the first value of a particular param by name if it exists otherwise false. """ |
vals = self.values_of(name)
if vals is not None:
return vals if type(vals) is not list else vals[0]
return default_value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_long_description():
""" Returns the long description of HaTeMiLe for Python. :return: The long description of HaTeMiLe for Python. :rtype: str """ |
with open(
os.path.join(BASE_DIRECTORY, 'README.md'),
'r',
encoding='utf-8'
) as readme_file:
return readme_file.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_packages():
""" Returns the packages used for HaTeMiLe for Python. :return: The packages used for HaTeMiLe for Python. :rtype: list(str) """ |
packages = find_packages(exclude=['tests'])
packages.append('')
packages.append('js')
packages.append(LOCALES_DIRECTORY)
for directory in os.listdir(LOCALES_DIRECTORY):
packages.append(LOCALES_DIRECTORY + '.' + directory)
return packages |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_package_data():
""" Returns the packages with static files of HaTeMiLe for Python. :return: The packages with static files of HaTeMiLe for Python. :rtype: dict(str, list(str)) """ |
package_data = {
'': ['*.xml'],
'js': ['*.js'],
LOCALES_DIRECTORY: ['*']
}
for directory in os.listdir(LOCALES_DIRECTORY):
package_data[LOCALES_DIRECTORY + '.' + directory] = ['*.json']
return package_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_requirements():
""" Returns the content of 'requirements.txt' in a list. :return: The content of 'requirements.txt'. :rtype: list(str) """ |
requirements = []
with open(
os.path.join(BASE_DIRECTORY, 'requirements.txt'),
'r',
encoding='utf-8'
) as requirements_file:
lines = requirements_file.readlines()
for line in lines:
requirements.append(line.strip())
return requirements |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def where_session_id(cls, session_id):
""" Easy way to query by session id """ |
try:
session = cls.query.filter_by(session_id=session_id).one()
return session
except (NoResultFound, MultipleResultsFound):
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count(cls, user_id):
""" Count sessions with user_id """ |
return cls.query.with_entities(
cls.user_id).filter_by(user_id=user_id).count() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_branch():
""" Returns the current code branch """ |
if os.getenv('GIT_BRANCH'):
# Travis
branch = os.getenv('GIT_BRANCH')
elif os.getenv('BRANCH_NAME'):
# Jenkins 2
branch = os.getenv('BRANCH_NAME')
else:
branch = check_output(
"git rev-parse --abbrev-ref HEAD".split(" ")
).decode('utf-8').strip()
return branch.replace("/", "_") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_version():
""" Returns the current code version """ |
try:
return check_output(
"git describe --tags".split(" ")
).decode('utf-8').strip()
except CalledProcessError:
return check_output(
"git rev-parse --short HEAD".split(" ")
).decode('utf-8').strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jenkins_last_build_sha():
""" Returns the sha of the last completed jenkins build for this project. Expects JOB_URL in environment """ |
job_url = os.getenv('JOB_URL')
job_json_url = "{0}/api/json".format(job_url)
response = urllib.urlopen(job_json_url)
job_data = json.loads(response.read())
last_completed_build_url = job_data['lastCompletedBuild']['url']
last_complete_build_json_url = "{0}/api/json".format(last_completed_build_url)
response = urllib.urlopen(last_complete_build_json_url)
last_completed_build = json.loads(response.read())
return last_completed_build[1]['lastBuiltRevision']['SHA1'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_changed_files_from(old_commit_sha, new_commit_sha):
""" Returns a list of the files changed between two commits """ |
return check_output(
"git diff-tree --no-commit-id --name-only -r {0}..{1}".format(
old_commit_sha,
new_commit_sha
).split(" ")
).decode('utf-8').strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_snow_tweets_from_file_generator(json_file_path):
""" A generator that opens a file containing many json tweets and yields all the tweets contained inside. Input: - json_file_path: The path of a json file containing a tweet in each line. Yields: - tweet: A tweet in python dictionary (json) format. """ |
with open(json_file_path, "r", encoding="utf-8") as fp:
for file_line in fp:
tweet = json.loads(file_line)
yield tweet |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_all_snow_tweets_from_disk_generator(json_folder_path):
""" A generator that returns all SNOW tweets stored in disk. Input: - json_file_path: The path of the folder containing the raw data. Yields: - tweet: A tweet in python dictionary (json) format. """ |
# Get a generator with all file paths in the folder
json_file_path_generator = (json_folder_path + "/" + name for name in os.listdir(json_folder_path))
for path in json_file_path_generator:
for tweet in extract_snow_tweets_from_file_generator(path):
yield tweet |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_snow_tweets_from_disk_to_mongodb(snow_tweets_folder):
""" Store all SNOW tweets in a mongodb collection. """ |
client = pymongo.MongoClient("localhost", 27017)
db = client["snow_tweet_storage"]
collection = db["tweets"]
for tweet in extract_all_snow_tweets_from_disk_generator(snow_tweets_folder):
collection.insert(tweet) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_file(f, full_path):
""" Saves file f to full_path and set rules. """ |
make_dirs_for_file_path(full_path, mode=dju_settings.DJU_IMG_CHMOD_DIR)
with open(full_path, 'wb') as t:
f.seek(0)
while True:
buf = f.read(dju_settings.DJU_IMG_RW_FILE_BUFFER_SIZE)
if not buf:
break
t.write(buf)
os.chmod(full_path, dju_settings.DJU_IMG_CHMOD_FILE) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_profile_configs(profile=None, use_cache=True):
""" Returns upload configs for profile. """ |
if use_cache and profile in _profile_configs_cache:
return _profile_configs_cache[profile]
profile_conf = None
if profile is not None:
try:
profile_conf = dju_settings.DJU_IMG_UPLOAD_PROFILES[profile]
except KeyError:
if profile != 'default':
raise ValueError(unicode(ERROR_MESSAGES['unknown_profile']) % {'profile': profile})
conf = copy.deepcopy(dju_settings.DJU_IMG_UPLOAD_PROFILE_DEFAULT)
if profile_conf:
conf.update(copy.deepcopy(profile_conf))
for v_i in xrange(len(conf['VARIANTS'])):
v = conf['VARIANTS'][v_i]
conf['VARIANTS'][v_i] = copy.deepcopy(dju_settings.DJU_IMG_UPLOAD_PROFILE_VARIANT_DEFAULT)
conf['VARIANTS'][v_i].update(v)
if use_cache:
_profile_configs_cache[profile] = conf
return conf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_img_id(profile, ext=None, label=None, tmp=False):
""" Generates img_id. """ |
if ext and not ext.startswith('.'):
ext = '.' + ext
if label:
label = re.sub(r'[^a-z0-9_\-]', '', label, flags=re.I)
label = re.sub(r'_+', '_', label)
label = label[:60]
return '{profile}:{tmp}{dtstr}_{rand}{label}{ext}'.format(
profile=profile,
tmp=(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX if tmp else ''),
dtstr=datetime_to_dtstr(),
rand=get_random_string(4, 'abcdefghijklmnopqrstuvwxyz0123456789'),
label=(('_' + label) if label else ''),
ext=(ext or ''),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_relative_path_from_img_id(img_id, variant_label=None, ext=None, create_dirs=False):
""" Returns path to file relative MEDIA_URL. """ |
profile, base_name = img_id.split(':', 1)
conf = get_profile_configs(profile)
if not variant_label:
status_suffix = dju_settings.DJU_IMG_UPLOAD_MAIN_SUFFIX
else:
status_suffix = dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX
name, file_ext = os.path.splitext(base_name)
prefix = ''
if name.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
name = name[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]
prefix = dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX
name_parts = name.split('_', 2)
name = '{name}{status_suffix}{hash}'.format(
name=name,
status_suffix=status_suffix,
hash=get_hash('_'.join(name_parts[:2]), variant_label=variant_label)
)
if variant_label:
name += '_' + variant_label
if ext:
file_ext = ext
elif variant_label:
for var_conf in conf['VARIANTS']:
var_conf_label = var_conf['LABEL'] or get_variant_label(var_conf)
if var_conf_label == variant_label:
if var_conf['FORMAT']:
file_ext = var_conf['FORMAT'].lower()
break
if file_ext and not file_ext.startswith('.'):
file_ext = '.' + file_ext
relative_path = os.path.join(
dju_settings.DJU_IMG_UPLOAD_SUBDIR,
conf['PATH'],
name_parts[0][-2:],
(prefix + name + file_ext)
).replace('\\', '/')
if create_dirs:
path = media_path(relative_path)
make_dirs_for_file_path(path, mode=dju_settings.DJU_IMG_CHMOD_DIR)
return relative_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_img_id_exists(img_id):
""" Checks if img_id has real file on filesystem. """ |
main_rel_path = get_relative_path_from_img_id(img_id)
main_path = media_path(main_rel_path)
return os.path.isfile(main_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_img_id_valid(img_id):
""" Checks if img_id is valid. """ |
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_all_files_of_img_id(img_id):
""" Removes all img_id's files. """ |
files = get_files_by_img_id(img_id, check_hash=False)
if files:
os.remove(media_path(files['main']))
for fn in files['variants'].values():
os.remove(media_path(fn)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_tmp_prefix_from_filename(filename):
""" Remove tmp prefix from filename. """ |
if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename})
return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_tmp_prefix_from_file_path(file_path):
""" Remove tmp prefix from file path or url. """ |
path, filename = os.path.split(file_path)
return os.path.join(path, remove_tmp_prefix_from_filename(filename)).replace('\\', '/') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_permalink(img_id):
""" Removes tmp prefix from filename and rename main and variant files. Returns img_id without tmp prefix. """ |
profile, filename = img_id.split(':', 1)
new_img_id = profile + ':' + remove_tmp_prefix_from_filename(filename)
urls = get_files_by_img_id(img_id)
if urls is None:
return urls
move_list = {(urls['main'], remove_tmp_prefix_from_file_path(urls['main']))}
for var_label, var_file_path in urls['variants'].iteritems():
move_list.add((var_file_path, remove_tmp_prefix_from_file_path(var_file_path)))
for file_path_from, file_path_to in move_list:
os.rename(media_path(file_path_from), media_path(file_path_to))
return new_img_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_from_fs(fn, profile=None, label=None):
""" Saves image from fn with TMP prefix and returns img_id. """ |
if not os.path.isfile(fn):
raise ValueError('File is not exists: {}'.format(fn))
if profile is None:
profile = 'default'
conf = get_profile_configs(profile)
with open(fn, 'rb') as f:
if not is_image(f, types=conf['TYPES']):
msg = (('Format of uploaded file "%(name)s" is not allowed. '
'Allowed formats is: %(formats)s.') %
{'name': fn, 'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))})
raise RuntimeError(msg)
return _custom_upload(f, profile, label, conf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_from_fileobject(f, profile=None, label=None):
""" Saves image from f with TMP prefix and returns img_id. """ |
if profile is None:
profile = 'default'
conf = get_profile_configs(profile)
f.seek(0)
if not is_image(f, types=conf['TYPES']):
msg = (('Format of uploaded file is not allowed. '
'Allowed formats is: %(formats)s.') %
{'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))})
raise RuntimeError(msg)
return _custom_upload(f, profile, label, conf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, method, url, **kwargs):
""" Unified method to make request to the Github API :param method: HTTP Method to use :param url: URL to reach :param kwargs: dictionary of arguments (params for URL parameters, data for post/put data) :return: Response """ |
if "data" in kwargs:
kwargs["data"] = json.dumps(kwargs["data"])
kwargs["headers"] = {
'Content-Type': 'application/json',
'Authorization': 'token %s' % self.__token__,
}
req = make_request(
method,
url,
**kwargs
)
self.logger.debug(
"Request::{}::{}".format(method, url),
extra={
"request": kwargs,
"response": {"headers": req.headers, "code": req.status_code, "data": req.content}
}
)
return req |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_branch(self, file):
""" Decide the name of the default branch given the file and the configuration :param file: File with informations about it :return: Branch Name """ |
if isinstance(self.__default_branch__, str):
return self.__default_branch__
elif self.__default_branch__ == GithubProxy.DEFAULT_BRANCH.NO:
return self.master_upstream
else:
return file.sha[:8] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app):
""" Initialize the application and register the blueprint :param app: Flask Application :return: Blueprint of the current nemo app :rtype: flask.Blueprint """ |
self.app = app
self.__blueprint__ = Blueprint(
self.__name__,
self.__name__,
url_prefix=self.__prefix__,
)
for url, name, methods in self.__urls__:
self.blueprint.add_url_rule(
url,
view_func=getattr(self, name),
endpoint=name.replace("r_", ""),
methods=methods
)
self.app = self.app.register_blueprint(self.blueprint)
return self.blueprint |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self, file):
""" Create a new file on github :param file: File to create :return: File or self.ProxyError """ |
input_ = {
"message": file.logs,
"author": file.author.dict(),
"content": file.base64,
"branch": file.branch
}
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
data = self.request("PUT", uri, data=input_)
if data.status_code == 201:
file.pushed = True
return file
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="put", context={
"uri": uri,
"params": input_
}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, file):
""" Check on github if a file exists :param file: File to check status of :return: File with new information, including blob, or Error :rtype: File or self.ProxyError """ |
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
params = {
"ref": file.branch
}
data = self.request("GET", uri, params=params)
# We update the file blob because it exists and we need it for update
if data.status_code == 200:
data = json.loads(data.content.decode("utf-8"))
file.blob = data["sha"]
elif data.status_code == 404:
pass
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="get", context={
"uri": uri,
"params": params
}
)
return file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, file):
""" Make an update query on Github API for given file :param file: File to update, with its content :return: File with new information, including success (or Error) """ |
params = {
"message": file.logs,
"author": file.author.dict(),
"content": file.base64,
"sha": file.blob,
"branch": file.branch
}
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
data = self.request("PUT", uri, data=params)
if data.status_code == 200:
file.pushed = True
return file
else:
reply = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (reply, "message"),
step="update", context={
"uri": uri,
"params": params
}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pull_request(self, file):
""" Create a pull request :param file: File to push through pull request :return: URL of the PullRequest or Proxy Error """ |
uri = "{api}/repos/{upstream}/pulls".format(
api=self.github_api_url,
upstream=self.upstream,
path=file.path
)
params = {
"title": "[Proxy] {message}".format(message=file.logs),
"body": "",
"head": "{origin}:{branch}".format(origin=self.origin.split("/")[0], branch=file.branch),
"base": self.master_upstream
}
data = self.request("POST", uri, data=params)
if data.status_code == 201:
return json.loads(data.content.decode("utf-8"))["html_url"]
else:
reply = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, reply["message"],
step="pull_request", context={
"uri": uri,
"params": params
}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ref(self, branch, origin=None):
""" Check if a reference exists :param branch: The branch to check if it exists :return: Sha of the branch if it exists, False if it does not exist, self.ProxyError if it went wrong """ |
if not origin:
origin = self.origin
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format(
api=self.github_api_url,
origin=origin,
branch=branch
)
data = self.request("GET", uri)
if data.status_code == 200:
data = json.loads(data.content.decode("utf-8"))
if isinstance(data, list):
# No addresses matches, we get search results which stars with {branch}
return False
# Otherwise, we get one record
return data["object"]["sha"]
elif data.status_code == 404:
return False
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="get_ref", context={
"uri": uri
}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_ref(self, branch):
""" Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError """ |
master_sha = self.get_ref(self.master_upstream)
if not isinstance(master_sha, str):
return self.ProxyError(
404,
"The default branch from which to checkout is either not available or does not exist",
step="make_ref"
)
params = {
"ref": "refs/heads/{branch}".format(branch=branch),
"sha": master_sha
}
uri = "{api}/repos/{origin}/git/refs".format(
api=self.github_api_url,
origin=self.origin
)
data = self.request("POST", uri, data=params)
if data.status_code == 201:
data = json.loads(data.content.decode("utf-8"))
return data["object"]["sha"]
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="make_ref", context={
"uri": uri,
"params": params
}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_sha(self, sha, content):
""" Check sent sha against the salted hash of the content :param sha: SHA sent through fproxy-secure-hash header :param content: Base 64 encoded Content :return: Boolean indicating equality """ |
rightful_sha = sha256(bytes("{}{}".format(content, self.secret), "utf-8")).hexdigest()
return sha == rightful_sha |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch_ref(self, sha):
""" Patch reference on the origin master branch :param sha: Sha to use for the branch :return: Status of success :rtype: str or self.ProxyError """ |
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format(
api=self.github_api_url,
origin=self.origin,
branch=self.master_fork
)
data = {
"sha": sha,
"force": True
}
reply = self.request(
"PATCH",
uri,
data=data
)
if reply.status_code == 200:
dic = json.loads(reply.content.decode("utf-8"))
return dic["object"]["sha"]
else:
dic = json.loads(reply.content.decode("utf-8"))
return self.ProxyError(
reply.status_code,
(dic, "message"),
step="patch",
context={
"uri": uri,
"data": data
}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def r_receive(self, filename):
""" Function which receives the data from Perseids - Check the branch does not exist - Make the branch if needed - Receive PUT from Perseids - Check if content exist - Update/Create content - Open Pull Request - Return PR link to Perseids It can take a "branch" URI parameter for the name of the branch :param filename: Path for the file :return: JSON Response with status_code 201 if successful. """ |
###########################################
# Retrieving data
###########################################
content = request.data.decode("utf-8")
# Content checking
if not content:
error = self.ProxyError(300, "Content is missing")
return error.response()
author_name = request.args.get("author_name", self.default_author.name)
author_email = request.args.get("author_email", self.default_author.email)
author = Author(author_name, author_email)
date = request.args.get("date", datetime.datetime.now().date().isoformat())
logs = request.args.get("logs", "{} updated {}".format(author.name, filename))
self.logger.info("Receiving query from {}".format(author_name), extra={"IP": request.remote_addr})
###########################################
# Checking data security
###########################################
secure_sha = None
if "fproxy-secure-hash" in request.headers:
secure_sha = request.headers["fproxy-secure-hash"]
if not secure_sha or not self.check_sha(secure_sha, content):
error = self.ProxyError(300, "Hash does not correspond with content")
return error.response()
###########################################
# Setting up data
###########################################
file = File(
path=filename,
content=content,
author=author,
date=date,
logs=logs
)
file.branch = request.args.get("branch", self.default_branch(file))
###########################################
# Ensuring branch exists
###########################################
branch_status = self.get_ref(file.branch)
if isinstance(branch_status, self.ProxyError): # If we have an error from github API
return branch_status.response()
elif not branch_status: # If it does not exist
# We create a branch
branch_status = self.make_ref(file.branch)
# If branch creation did not work
if isinstance(branch_status, self.ProxyError):
return branch_status.response()
###########################################
# Pushing files
###########################################
# Check if file exists
# It feeds file.blob parameter, which tells us the sha of the file if it exists
file = self.get(file)
if isinstance(file, self.ProxyError): # If we have an error from github API
return file.response()
# If it has a blob set up, it means we can update given file
if file.blob:
file = self.update(file)
# Otherwise, we create it
else:
file = self.put(file)
if isinstance(file, self.ProxyError):
return file.response()
###########################################
# Making pull request
###########################################
pr_url = self.pull_request(file)
if isinstance(pr_url, self.ProxyError):
return pr_url.response()
reply = {
"status": "success",
"message": "The workflow was well applied",
"pr_url": pr_url
}
data = jsonify(reply)
data.status_code = 201
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def r_update(self):
""" Updates a fork Master - Check the ref of the origin repository - Patch reference of fork repository - Return status to Perseids :return: JSON Response with status_code 201 if successful. """ |
# Getting Master Branch
upstream = self.get_ref(self.master_upstream, origin=self.upstream)
if isinstance(upstream, bool):
return (ProxyError(
404, "Upstream Master branch '{0}' does not exist".format(self.master_upstream),
step="get_upstream_ref"
)).response()
elif isinstance(upstream, self.ProxyError):
return upstream.response()
# Patching
new_sha = self.patch_ref(upstream)
if isinstance(new_sha, self.ProxyError):
return new_sha.response()
self.logger.info("Updated repository {} to sha {}".format(self.origin, new_sha), extra={"former_sha": upstream})
return jsonify({
"status": "success",
"commit": new_sha
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_where_user_id(cls, user_id):
""" delete by email """ |
result = cls.where_user_id(user_id)
if result is None:
return None
result.delete()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def int_filter(text):
"""Extract integer from text. **中文文档** 摘除文本内的整数。 """ |
res = list()
for char in text:
if char.isdigit():
res.append(char)
return int("".join(res)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def float_filter(text):
"""Extract float from text. **中文文档** 摘除文本内的小数。 """ |
res = list()
for char in text:
if (char.isdigit() or (char == ".")):
res.append(char)
return float("".join(res)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, filename, offset):
"""Will eventually load information for Apple_Boot volume. Not yet implemented""" |
try:
self.offset = offset
# self.fd = open(filename, 'rb')
# self.fd.close()
except IOError as e:
print(e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve(accessor: hexdi.core.clstype) -> __gentype__.T: """ shortcut for resolving from root container :param accessor: accessor for resolving object :return: resolved object of requested type """ |
return hexdi.core.get_root_container().resolve(accessor=accessor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind_type(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype, lifetime_manager: hexdi.core.ltype):
""" shortcut for bind_type on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object :param lifetime_manager: type of lifetime manager for this binding """ |
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime_manager) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind_permanent(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype):
""" shortcut for bind_type with PermanentLifeTimeManager on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object """ |
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PermanentLifeTimeManager) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind_transient(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype):
""" shortcut for bind_type with PerResolveLifeTimeManager on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object """ |
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PerResolveLifeTimeManager) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_series(self, series):
""" Returns a census series API handler. """ |
if series == "acs1":
return self.census.acs1dp
elif series == "acs5":
return self.census.acs5
elif series == "sf1":
return self.census.sf1
elif series == "sf3":
return self.census.sf3
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_system_repository(self, repository_type, reset_on_start, repository_class=None):
""" Sets up the system repository with the given repository type. :param str repository_type: Repository type to use for the SYSTEM repository. :param bool reset_on_start: Flag to indicate whether stored system resources should be discarded on startup. :param repository_class: class to use for the system repository. If not given, the registered class for the given type will be used. """ |
# Set up the system entity repository (this does not join the
# transaction and is in autocommit mode).
cnf = dict(messaging_enable=True,
messaging_reset_on_start=reset_on_start)
system_repo = self.new(repository_type,
name=REPOSITORY_DOMAINS.SYSTEM,
repository_class=repository_class,
configuration=cnf)
self.set(system_repo) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize_all(self):
""" Convenience method to initialize all repositories that have not been initialized yet. """ |
for repo in itervalues_(self.__repositories):
if not repo.is_initialized:
repo.initialize() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| async def file(location, mime_type=None, headers=None, _range=None):
'''Return a response object with file data.
:param location: Location of file on system.
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
:param _range:
'''
filename = path.split(location)[-1]
async with open_async(location, mode='rb') as _file:
if _range:
await _file.seek(_range.start)
out_stream = await _file.read(_range.size)
headers['Content-Range'] = 'bytes %s-%s/%s' % (
_range.start, _range.end, _range.total)
else:
out_stream = await _file.read()
mime_type = mime_type or guess_type(filename)[0] or 'text/plain'
return HTTPResponse(status=200,
headers=headers,
content_type=mime_type,
body_bytes=out_stream) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bundles():
""" Used to cache the bundle definitions rather than loading from config every time they're used """ |
global _cached_bundles
if not _cached_bundles:
_cached_bundles = BundleManager()
for bundle_conf in bundles_settings.BUNDLES:
_cached_bundles[bundle_conf[0]] = Bundle(bundle_conf)
return _cached_bundles |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bundle_versions():
""" Used to cache the bundle versions rather than loading them from the bundle versions file every time they're used """ |
global _cached_versions
if not bundles_settings.BUNDLES_VERSION_FILE:
_cached_versions = {}
if _cached_versions is None:
locs = {}
try:
execfile(bundles_settings.BUNDLES_VERSION_FILE, locs)
_cached_versions = locs['BUNDLES_VERSIONS']
except IOError:
_cached_versions = {}
return _cached_versions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_url(self, version=None):
""" Return the filename of the bundled bundle """ |
if self.fixed_bundle_url:
return self.fixed_bundle_url
return '%s.%s.%s' % (os.path.join(self.bundle_url_root, self.bundle_filename), version or self.get_version(), self.bundle_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_file_urls(self):
""" Return a list of file urls - will return a single item if settings.USE_BUNDLES is True """ |
if self.use_bundle:
return [self.get_url()]
return [bundle_file.file_url for bundle_file in self.files] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def export_batch(self):
"""Returns a batch instance after exporting a batch of txs. """ |
batch = self.batch_cls(
model=self.model, history_model=self.history_model, using=self.using
)
if batch.items:
try:
json_file = self.json_file_cls(batch=batch, path=self.path)
json_file.write()
except JSONDumpFileError as e:
raise TransactionExporterError(e)
batch.close()
return batch
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_key(self, key):
""" Ensure key is either in schema's attributes or already set on self. """ |
self.setup_schema()
if key not in self._attrs and key not in self:
raise KeyError(key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hirise_edr(self, pid, chunk_size=1024*1024):
""" Download a HiRISE EDR set of .IMG files to the CWD You must know the full id to specifiy the filter to use, ie: PSP_XXXXXX_YYYY will download every EDR IMG file available PSP_XXXXXX_YYYY_R will download every EDR RED filter IMG file PSP_XXXXXX_YYYY_BG12_0 will download only the BG12_0 As a wild card is auto applied to the end of the provided pid pid: product ID of the CTX EDR, partial IDs ok chunk_size: Chunk size in bytes to use in download """ |
productid = "{}*".format(pid)
query = {"target" : "mars",
"query" : "product",
"results" : "f",
"output" : "j",
"pt" : "EDR",
"iid" : "HiRISE",
"ihid" : "MRO",
"productid" : productid}
# Query the ODE
products = query_ode(self.ode_url, query)
# Validate query results with conditions for this particular query
if len(products) > 30:
print("Error: Too many products selected for in query, Make PID more specific")
sys.exit(1)
if not isinstance(products, list):
print("Error: Too few responses from server to be a full HiRISE EDR, ")
else:
# proceed to download
for product in products:
download_edr_img_files(product, self.https, chunk_size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect(self, filename, offset, standalone=False):
"""Verifies NTFS filesystem signature. Returns: bool: True if filesystem signature at offset 0x03 \ matches 'NTFS ', False otherwise. """ |
r = RawStruct(
filename=filename,
offset=offset + SIG_OFFSET,
length=SIG_SIZE)
oem_id = r.data
if oem_id == b"NTFS ":
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cls, v):
"""Load the action from configuration""" |
if v is None:
return []
if isinstance(v, list):
return [ Action(s) for s in v ]
elif isinstance(v, str):
return [Action(v)]
else:
raise ParseError("Couldn't parse action: %r" % v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_stream(cls, st):
"""Load Automatons from a stream""" |
y = yaml.load(st)
return [ Automaton(k, v) for k, v in y.iteritems() ] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_dot(self, filename_or_stream, auts):
"""Create a graphviz .dot representation of the automaton.""" |
if isinstance(filename_or_stream, str):
stream = file(filename_or_stream, 'w')
else:
stream = filename_or_stream
dot = DotFile(stream)
for aut in auts:
dot.start(aut.name)
dot.node('shape=Mrecord width=1.5')
for st in aut.states:
label = st.name
if st.entering:
label += '|%s' % '\\l'.join(str(st) for st in st.entering)
if st.leaving:
label += '|%s' % '\\l'.join(str(st) for st in st.leaving)
label = '{%s}' % label
dot.state(st.name, label=label)
for st in aut.states:
for tr in st.transitions:
dot.transition(tr.s_from.name, tr.s_to.name, tr.when)
dot.end()
dot.finish() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, url):
"""Create a bucket, directory, or empty file.""" |
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
return self.call("CreateBucket", bucket=target) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy(self, url, recursive=False):
"""Destroy a bucket, directory, or file. Specifying recursive=True recursively deletes all subdirectories and files.""" |
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
if recursive:
for obj in self.get(url, delimiter=''):
self.destroy(obj['url'])
return self.call("DeleteBucket", bucket=target) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload(self, local_path, remote_url):
"""Copy a local file to an S3 location.""" |
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(self, remote_url, local_path, buffer_size=8 * 1024):
"""Copy S3 data to a local file.""" |
bucket, key = _parse_url(remote_url)
response_file = self.call("GetObject", bucket=bucket, key=key)['Body']
with open(local_path, 'wb') as fp:
buf = response_file.read(buffer_size)
while buf:
fp.write(buf)
buf = response_file.read(buffer_size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self, src_url, dst_url):
"""Copy an S3 object to another S3 location.""" |
src_bucket, src_key = _parse_url(src_url)
dst_bucket, dst_key = _parse_url(dst_url)
if not dst_bucket:
dst_bucket = src_bucket
params = {
'copy_source': '/'.join((src_bucket, src_key)),
'bucket': dst_bucket,
'key': dst_key,
}
return self.call("CopyObject", **params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move(self, src_url, dst_url):
"""Copy a single S3 object to another S3 location, then delete the original object.""" |
self.copy(src_url, dst_url)
self.destroy(src_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_shard_names(self):
""" get_shard_names returns an array containing the names of the shards in the cluster. This is determined with num_shards and shard_name_format """ |
results = []
for shard_num in range(0, self.num_shards()):
shard_name = self.get_shard_name(shard_num)
results.append(shard_name)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_canonical_key_id(self, key_id):
""" get_canonical_key_id is used by get_canonical_key, see the comment for that method for more explanation. Keyword arguments: key_id -- the key id (e.g. '12345') returns the canonical key id (e.g. '12') """ |
shard_num = self.get_shard_num_by_key_id(key_id)
return self._canonical_keys[shard_num] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_shard_by_num(self, shard_num):
""" get_shard_by_num returns the shard at index shard_num. Keyword arguments: shard_num -- The shard index Returns a redis.StrictRedis connection or raises a ValueError. """ |
if shard_num < 0 or shard_num >= self.num_shards():
raise ValueError("requested invalid shard# {0}".format(shard_num))
return self._shards[shard_num] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_key_id_from_key(self, key):
""" _get_key_id_from_key returns the key id from a key, if found. otherwise it just returns the key to be used as the key id. Keyword arguments: key -- The key to derive the ID from. If curly braces are found in the key, then the contents of the curly braces are used as the key id for the key. Returns the key id portion of the key, or the whole key if no hash tags are present. """ |
key_id = key
regex = '{0}([^{1}]*){2}'.format(self._hash_start, self._hash_stop,
self._hash_stop)
m = re.search(regex, key)
if m is not None:
# Use what's inside the hash tags as the key id, if present.
# Otherwise the whole key will be used as the key id.
key_id = m.group(1)
return key_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_canonical_key_ids(self, search_amplifier=100):
""" A canonical key id is the lowest integer key id that maps to a particular shard. The mapping to canonical key ids depends on the number of shards. Returns a dictionary mapping from shard number to canonical key id. This method will throw an exception if it fails to compute all of the canonical key ids. """ |
canonical_keys = {}
num_shards = self.num_shards()
# Guarantees enough to find all keys without running forever
num_iterations = (num_shards**2) * search_amplifier
for key_id in range(1, num_iterations):
shard_num = self.get_shard_num_by_key(str(key_id))
if shard_num in canonical_keys:
continue
canonical_keys[shard_num] = str(key_id)
if len(canonical_keys) == num_shards:
break
if len(canonical_keys) != num_shards:
raise ValueError("Failed to compute enough keys. " +
"Wanted %d, got %d (search_amp=%d).".format(
num_shards, len(canonical_keys),
search_amplifier))
return canonical_keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keys(self, args):
""" keys wrapper that queries every shard. This is an expensive operation. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """ |
results = {}
# TODO: parallelize
for shard_num in range(0, self.num_shards()):
shard = self.get_shard_by_num(shard_num)
results[shard_num] = shard.keys(args)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mget(self, args):
""" mget wrapper that batches keys per shard and execute as few mgets as necessary to fetch the keys from all the shards involved. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """ |
key_map = collections.defaultdict(list)
results = {}
for key in args:
shard_num = self.get_shard_num_by_key(key)
key_map[shard_num].append(key)
# TODO: parallelize
for shard_num in key_map.keys():
shard = self.get_shard_by_num(shard_num)
results[shard_num] = shard.mget(key_map[shard_num])
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mset(self, args):
""" mset wrapper that batches keys per shard and execute as few msets as necessary to set the keys in all the shards involved. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """ |
key_map = collections.defaultdict(dict)
result_count = 0
for key in args.keys():
value = args[key]
shard_num = self.get_shard_num_by_key(key)
key_map[shard_num][key] = value
# TODO: parallelize
for shard_num in key_map.keys():
shard = self.get_shard_by_num(shard_num)
result_count += shard.mset(key_map[shard_num])
return result_count |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def id_generator(start=0):
""" Generator for sequential numeric numbers. """ |
count = start
while True:
send_value = (yield count)
if not send_value is None:
if send_value < count:
raise ValueError('Values from ID generator must increase '
'monotonically (current value: %d; value '
'sent to generator: %d).'
% (count, send_value))
count = send_value
else:
count += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generative(func):
""" Marks an instance method as generative. """ |
def wrap(inst, *args, **kw):
clone = type(inst).__new__(type(inst))
clone.__dict__ = inst.__dict__.copy()
return func(clone, *args, **kw)
return update_wrapper(wrap, func) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def truncate(message, limit=500):
""" Truncates the message to the given limit length. The beginning and the end of the message are left untouched. """ |
if len(message) > limit:
trc_msg = ''.join([message[:limit // 2 - 2],
' .. ',
message[len(message) - limit // 2 + 2:]])
else:
trc_msg = message
return trc_msg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_board(board_id):
"""remove board. :param board_id: board id (e.g. 'diecimila') :rtype: None """ |
log.debug('remove %s', board_id)
lines = boards_txt().lines()
lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines)
boards_txt().write_lines(lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_route(self, route) -> dict:
""" Construct a route to be parsed into flask App """ |
middleware = route['middleware'] if 'middleware' in route else None
# added to ALL requests to support xhr cross-site requests
route['methods'].append('OPTIONS')
return {
'url': route['url'],
'name': route['name'],
'methods': route['methods'],
'middleware': middleware,
'callback': {
'module': route['function'].__module__,
'class': route['function'].__qualname__.rsplit('.', 1)[0],
'function': route['function'].__name__
}
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diffusion_driver(self):
""" diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW` :return list(StochasticProcess):
""" |
if self._diffusion_driver is None:
return self,
if isinstance(self._diffusion_driver, list):
return tuple(self._diffusion_driver)
if isinstance(self._diffusion_driver, tuple):
return self._diffusion_driver
return self._diffusion_driver, |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset_codenames(self, dry_run=None, clear_existing=None):
"""Ensures all historical model codenames exist in Django's Permission model. """ |
self.created_codenames = []
self.updated_names = []
actions = ["add", "change", "delete", "view"]
if django.VERSION >= (2, 1):
actions.append("view")
for app in django_apps.get_app_configs():
for model in app.get_models():
try:
getattr(model, model._meta.simple_history_manager_attribute)
except AttributeError:
pass
else:
self.update_or_create(
model, dry_run=dry_run, clear_existing=clear_existing
)
if dry_run:
print("This is a dry-run. No modifications were made.")
if self.created_codenames:
print("The following historical permission.codenames were be added:")
pprint(self.created_codenames)
else:
print("No historical permission.codenames were added.")
if self.updated_names:
print("The following historical permission.names were updated:")
pprint(self.updated_names)
else:
print("No historical permission.names were updated.") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.